diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index 3aafd2fad..566bd3ff6 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -23,7 +23,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
- go-version: 1.15
+ go-version: 1.16
- name: install
run: GO111MODULE=off go get golang.org/x/tools/cmd/goimports honnef.co/go/tools/cmd/staticcheck
diff --git a/Makefile b/Makefile
index 15c70746f..6e817aff5 100644
--- a/Makefile
+++ b/Makefile
@@ -24,7 +24,7 @@ imports:
.PHONY: test
test:
- GO111MODULE=on $(GO) test -mod=vendor -tags netgo,builtinassets -v ./...
+ GO111MODULE=on $(GO) test -mod=vendor -tags netgo,builtinassets ./...
.PHONY: release
release:
diff --git a/cmd/promxy/main.go b/cmd/promxy/main.go
index 30d9ed6ff..a82118da3 100644
--- a/cmd/promxy/main.go
+++ b/cmd/promxy/main.go
@@ -33,7 +33,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/prometheus/common/log"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/version"
@@ -319,7 +318,7 @@ func main() {
}
files = append(files, fs...)
}
- if err := ruleManager.Update(time.Duration(cfg.GlobalConfig.EvaluationInterval), files, cfg.GlobalConfig.ExternalLabels); err != nil {
+ if err := ruleManager.Update(time.Duration(cfg.GlobalConfig.EvaluationInterval), files, cfg.GlobalConfig.ExternalLabels, externalUrl.String()); err != nil {
return err
}
@@ -341,7 +340,7 @@ func main() {
}}))
// We need an empty scrape manager, simply to make the API not panic and error out
- scrapeManager := scrape.NewManager(kitlog.With(logger, "component", "scrape manager"), nil)
+ scrapeManager := scrape.NewManager(nil, kitlog.With(logger, "component", "scrape manager"), nil)
webOptions := &web.Options{
Registerer: prometheus.DefaultRegisterer,
@@ -442,9 +441,9 @@ func main() {
for {
select {
case rc := <-webHandler.Reload():
- log.Infof("Reloading config")
+ logrus.Infof("Reloading config")
if err := reloadConfig(noStepSubqueryInterval, reloadables...); err != nil {
- log.Errorf("Error reloading config: %s", err)
+ logrus.Errorf("Error reloading config: %s", err)
rc <- err
} else {
rc <- nil
@@ -452,12 +451,12 @@ func main() {
case sig := <-sigs:
switch sig {
case syscall.SIGHUP:
- log.Infof("Reloading config")
+ logrus.Infof("Reloading config")
if err := reloadConfig(noStepSubqueryInterval, reloadables...); err != nil {
- log.Errorf("Error reloading config: %s", err)
+ logrus.Errorf("Error reloading config: %s", err)
}
case syscall.SIGTERM, syscall.SIGINT:
- log.Info("promxy received exit signal, starting graceful shutdown")
+ logrus.Info("promxy received exit signal, starting graceful shutdown")
// Stop all services we are running
stopping = true // start failing healthchecks
@@ -465,10 +464,10 @@ func main() {
ruleManager.Stop() // Stop rule manager
if opts.ShutdownDelay > 0 {
- log.Infof("promxy delaying shutdown by %v", opts.ShutdownDelay)
+ logrus.Infof("promxy delaying shutdown by %v", opts.ShutdownDelay)
time.Sleep(opts.ShutdownDelay)
}
- log.Infof("promxy exiting with timeout: %v", opts.ShutdownTimeout)
+ logrus.Infof("promxy exiting with timeout: %v", opts.ShutdownTimeout)
defer cancel()
if opts.ShutdownTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, opts.ShutdownTimeout)
@@ -477,7 +476,7 @@ func main() {
srv.Shutdown(ctx)
return
default:
- log.Errorf("Uncaught signal: %v", sig)
+ logrus.Errorf("Uncaught signal: %v", sig)
}
}
diff --git a/go.mod b/go.mod
index a03c00b7d..5fe2c0b64 100644
--- a/go.mod
+++ b/go.mod
@@ -3,28 +3,28 @@ module github.com/jacksontj/promxy
go 1.13
require (
- github.com/Azure/go-autorest/autorest v0.11.15
+ github.com/Azure/go-autorest/autorest v0.11.20
github.com/go-kit/kit v0.10.0
- github.com/gogo/protobuf v1.3.1
+ github.com/gogo/protobuf v1.3.2
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
- github.com/golang/snappy v0.0.2
- github.com/jessevdk/go-flags v1.4.0
+ github.com/golang/snappy v0.0.4
+ github.com/jessevdk/go-flags v1.5.0
github.com/julienschmidt/httprouter v1.3.0
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.9.0
+ github.com/prometheus/client_golang v1.11.1-0.20210607165600-196536534fbb
github.com/prometheus/client_model v0.2.0
- github.com/prometheus/common v0.15.0
- github.com/prometheus/exporter-toolkit v0.5.1
- github.com/prometheus/prometheus v1.8.1-0.20200513230854-c784807932c2
- github.com/sirupsen/logrus v1.6.0
- github.com/stretchr/testify v1.6.1
- go.uber.org/atomic v1.7.0
- golang.org/x/time v0.0.0-20201208040808-7e3f01d25324
+ github.com/prometheus/common v0.30.0
+ github.com/prometheus/exporter-toolkit v0.6.1
+ github.com/prometheus/prometheus v1.8.2-0.20210707132820-dc8f50559534
+ github.com/sirupsen/logrus v1.7.0
+ github.com/stretchr/testify v1.7.0
+ go.uber.org/atomic v1.9.0
+ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
gopkg.in/yaml.v2 v2.4.0
k8s.io/klog v1.0.0
)
-replace github.com/prometheus/prometheus => github.com/jacksontj/prometheus v1.8.1-0.20210816231554-929ddf93a7db
+replace github.com/prometheus/prometheus => github.com/jacksontj/prometheus v1.8.1-0.20210917180120-0c8fa0a92ce8
replace github.com/golang/glog => github.com/kubermatic/glog-gokit v0.0.0-20181129151237-8ab7e4c2d352
diff --git a/go.sum b/go.sum
index 3deef393c..a0527aa75 100644
--- a/go.sum
+++ b/go.sum
@@ -1,13 +1,12 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
@@ -15,15 +14,24 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0 h1:eWRCuwubtDrCJG0oSUMgnsbD4CmPFQF2ei4OFbXvwww=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
+cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -37,48 +45,81 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v49.2.0+incompatible h1:23a1GeBzTLeT53StH9NDJyCMhxCH3awTZaw9ZYBcq78=
-github.com/Azure/azure-sdk-for-go v49.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM=
+github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
+github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest v0.11.15 h1:S5SDFpmgoVyvMEOcULyEDlYFrdPmu6Wl0Ic+shkEwzg=
-github.com/Azure/go-autorest/autorest v0.11.15/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M=
+github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI=
-github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk=
+github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
+github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
-github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
-github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
-github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
+github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
+github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
-github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg=
-github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
+github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I=
+github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
-github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -86,6 +127,8 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
@@ -93,22 +136,23 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4 h1:EBTWhcAX7rNQ80RLwLCpHZBBrJuzallFHnF+yMXo928=
-github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4=
+github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
+github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
+github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro=
-github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
@@ -119,69 +163,212 @@ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:o
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
-github.com/aws/aws-sdk-go v1.36.15 h1:nGqgPlXegCKPZOKXvWnYCLvLPJPRoSOHHn9d0N0DG7Y=
-github.com/aws/aws-sdk-go v1.36.15/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
+github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48=
+github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
+github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
+github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
+github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
-github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI=
-github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA=
+github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
+github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
+github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/digitalocean/godo v1.54.0 h1:KP0Nv87pgViR8k/7De3VrmflCL5pJqXbNnkcw0bwG10=
-github.com/digitalocean/godo v1.54.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
+github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA=
+github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.1+incompatible h1:u0HIBLwOJdemyBdTCkoBX34u3lb5KyBo0rQE3a5Yg+E=
-github.com/docker/docker v20.10.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
+github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
@@ -190,44 +377,66 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA=
+github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
+github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM=
+github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
-github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
+github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@@ -235,17 +444,19 @@ github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9sn
github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
-github.com/go-openapi/analysis v0.19.14 h1:OPuUe8ApFeu59GeTsZtb0mLWHu5MipU4fDvxNLsG3bw=
-github.com/go-openapi/analysis v0.19.14/go.mod h1:zN0kY6i38wo2LQOwltVyMk61bqlqOm86n1/Iszo8F8Y=
+github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk=
+github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro=
+github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og=
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc=
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.0 h1:Sxpo9PjEHDzhs3FbnGNonvDgWcMW2U7wGTcDDSFSceM=
+github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
@@ -256,32 +467,38 @@ github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg=
-github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI=
-github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY=
-github.com/go-openapi/loads v0.19.6 h1:6IAtnx22MNSjPocZZ2sV7EjgF6wW5rDC9r6ZkNxjiN8=
github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc=
+github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc=
+github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4=
+github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc=
+github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o=
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo=
github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98=
-github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4=
github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk=
+github.com/go-openapi/runtime v0.19.29 h1:5IIvCaIDbxetN674vX9eOxvoZ9mYGQ16fV1Q0VSG+NA=
+github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
-github.com/go-openapi/spec v0.19.14 h1:r4fbYFo6N4ZelmSX8G6p+cv/hZRXzcuqQIADGT1iNKM=
-github.com/go-openapi/spec v0.19.14/go.mod h1:gwrgJS15eCUgjLpMjBJmbZezCsw88LmgeEip0M63doA=
+github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU=
+github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU=
+github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ=
+github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ=
+github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg=
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
@@ -289,30 +506,41 @@ github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
-github.com/go-openapi/strfmt v0.19.11 h1:0+YvbNh05rmBkgztd6zHp4OCFn7Mtu30bn46NQo2ZRw=
github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
+github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
+github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
+github.com/go-openapi/strfmt v0.20.2 h1:6XZL+fF4VZYFxKQGLAUB358hOrRh/wS51uWEtlONADE=
+github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
-github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY=
-github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI=
github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M=
+github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
+github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
-github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8=
github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4=
-github.com/go-openapi/validate v0.19.14 h1:G/lAG8al3droiEQyYdK0IP71au/V63x4LWF3TFv9n/k=
-github.com/go-openapi/validate v0.19.14/go.mod h1:PdGrHe0rp6MG3A1SrAY/rIHATqzJEEhohGE1atLkBEQ=
+github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI=
+github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0=
+github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts=
+github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0=
+github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
+github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
+github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY=
+github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM=
+github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
@@ -337,20 +565,35 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -358,6 +601,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -371,16 +616,23 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
-github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -389,8 +641,11 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -399,6 +654,7 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -407,53 +663,68 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
+github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/gophercloud/gophercloud v0.15.0 h1:jQeAWj0s1p83+TrUXhJhEOK4oe2g6YcBcFwEyMNIjEk=
-github.com/gophercloud/gophercloud v0.15.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk=
+github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
-github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8=
-github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk=
+github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=
+github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc=
-github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
+github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4=
-github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8=
-github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
@@ -467,40 +738,53 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg=
+github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
-github.com/hetznercloud/hcloud-go v1.23.1 h1:SkYdCa6x458cMSDz5GI18iPz5j2hicACiDP6J/s/bTs=
-github.com/hetznercloud/hcloud-go v1.23.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hetznercloud/hcloud-go v1.32.0 h1:7zyN2V7hMlhm3HZdxOarmOtvzKvkcYKjM0hcwYMQZz0=
+github.com/hetznercloud/hcloud-go v1.32.0/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
-github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
+github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I=
+github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
+github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc=
+github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
-github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
-github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
+github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
+github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
+github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
-github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
+github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
-github.com/jacksontj/prometheus v1.8.1-0.20210816231554-929ddf93a7db h1:zX9G0b3LGxdL2CQFx+b0563gM9V8dwv14KkKVKcxiMw=
-github.com/jacksontj/prometheus v1.8.1-0.20210816231554-929ddf93a7db/go.mod h1:pZyryEk2SoMVjRI6XFqZLW7B9vPevv8lqwESVYjP1WA=
-github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jacksontj/prometheus v1.8.1-0.20210917180120-0c8fa0a92ce8 h1:eLfuNbcHuyoE9A2B3JRLpYMlqRxfaX5IOm11mbzFqrg=
+github.com/jacksontj/prometheus v1.8.1-0.20210917180120-0c8fa0a92ce8/go.mod h1:02eURgmH1YsgJ2TtWNUGMQMCnLxmtHH9nOgvYxIjGAo=
+github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@@ -516,11 +800,12 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
+github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
@@ -531,16 +816,16 @@ github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaR
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -553,10 +838,17 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubermatic/glog-gokit v0.0.0-20181129151237-8ab7e4c2d352 h1:IwAUUBomA7q2TedTHQ3ebjwXVp8aHwi453m7AFn0vCk=
github.com/kubermatic/glog-gokit v0.0.0-20181129151237-8ab7e4c2d352/go.mod h1:WUD00rCUd6ntFOOuzJbT/MRrM+6X0Zufa6wWYBcf4zY=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
+github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU=
+github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM=
+github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -567,27 +859,39 @@ github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
-github.com/miekg/dns v1.1.35 h1:oTfOaDH+mZkdcgdIjH6yBajRGtIwcwcaR+rt23ZSrJs=
-github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -601,8 +905,17 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -614,8 +927,9 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -627,34 +941,63 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w=
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -663,40 +1006,47 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
-github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/prometheus/alertmanager v0.21.0 h1:qK51JcUR9l/unhawGA9F9B64OCYfcGewhPNprem/Acc=
-github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/alertmanager v0.23.0 h1:KIb9IChC3kg+1CC388qfr7bsT+tARpQqdsCMoatdObA=
+github.com/prometheus/alertmanager v0.23.0/go.mod h1:0MLTrjQI8EuVmvykEhcfr/7X0xmaDAZrqMgxIq3OXHk=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
-github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
+github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
-github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1-0.20210607165600-196536534fbb h1:NDugDiOx6TQAZ0EUR+omAScbg74fyQ58fD4vqdpRoZ4=
+github.com/prometheus/client_golang v1.11.1-0.20210607165600-196536534fbb/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -704,24 +1054,37 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
-github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/exporter-toolkit v0.5.1 h1:9eqgis5er9xN613ZSADjypCJaDGj9ZlcWBvsIHa8/3c=
-github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
+github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
+github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0=
+github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
@@ -729,91 +1092,150 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 h1:AJNDS0kP60X8wwWFvbLPwDuojxubj9pbfK7pjHw0vKg=
-github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 h1:3egqo0Vut6daANFm7tOXdNAa8v5/uLU+sgCJrc88Meo=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
-github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/simonpasquier/klog-gokit v0.1.0 h1:l3GGzgwlUF4vC1ApCOEsMsV+6nJPM01VoVCUCZgOIUw=
github.com/simonpasquier/klog-gokit v0.1.0/go.mod h1:4lorAA0CyDox4KO34BrvNAJk8J2Ma/M9Q2BDkR38vSI=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
-github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ=
-github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
+github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E=
+github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4=
+github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
+github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
-github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
-github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
+github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
-go.mongodb.org/mongo-driver v1.4.3 h1:moga+uhicpVshTyaqY9L23E6QqwcHRUv1sqyOsoyOO8=
go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
+go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
+go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
+go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI=
+go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -821,24 +1243,37 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -846,13 +1281,20 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9 h1:sYNJzB4J8toYPQTM6pAkcmBRgw9SnQKP9oXCHfgy604=
-golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -865,6 +1307,7 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
@@ -878,8 +1321,10 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -887,12 +1332,16 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -905,14 +1354,17 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -923,16 +1375,30 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg=
+golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -940,8 +1406,15 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 h1:Lm4OryKCca1vehdsWogr9N4t7NfZxLbJoc/H0w4K4S4=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -952,8 +1425,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -972,68 +1446,114 @@ golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201223074533-0d417f636930 h1:vRgIt+nup/B/BwIS0g2oC0haq0iqbV3ZA+u6+0TlNCo=
-golang.org/x/sys v0.0.0-20201223074533-0d417f636930/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1072,11 +1592,9 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1085,22 +1603,33 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201228162255-34cd474b9958 h1:8dEW6cGnUF2NIvtKDx8YsMBEw5pUrMEVUrU0jiPgmu8=
-golang.org/x/tools v0.0.0-20201228162255-34cd474b9958/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1108,10 +1637,13 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
+gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1130,8 +1662,17 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1141,13 +1682,14 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -1155,8 +1697,8 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1175,9 +1717,32 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e h1:wYR00/Ht+i/79g/gzhdehBgLIJCklKoc8Q/NebdzzpY=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@@ -1187,6 +1752,7 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -1197,8 +1763,19 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1208,25 +1785,35 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=
gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
@@ -1242,8 +1829,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
@@ -1257,26 +1845,54 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1 h1:ud1c3W3YNzGd6ABJlbFfKXBKXO+1KdGfcgGGNgFR03E=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
+k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/client-go v0.20.1 h1:Qquik0xNFbK9aUG92pxHYsyfea5/RPO9o9bSywNor+M=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
+k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE=
+k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
+k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/pkg/noop/README.md b/pkg/noop/README.md
deleted file mode 100644
index e546039b9..000000000
--- a/pkg/noop/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-#noop
-
-A collection of no-op implementations of various interfaces
diff --git a/pkg/noop/noop.go b/pkg/noop/noop.go
deleted file mode 100644
index 13d0594b7..000000000
--- a/pkg/noop/noop.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package noop
-
-import (
- "context"
- "fmt"
-
- "github.com/prometheus/prometheus/pkg/labels"
- "github.com/prometheus/prometheus/storage"
-)
-
-type noopStorage struct{}
-
-// NewNoopStorage returns a new noop storage
-func NewNoopStorage() storage.Storage {
- return &noopStorage{}
-}
-
-// Querier returns a new Querier on the storage.
-func (n *noopStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
- return storage.NoopQuerier(), nil
-}
-
-// StartTime returns the oldest timestamp stored in the storage.
-func (n *noopStorage) StartTime() (int64, error) {
- return 0, nil
-}
-
-// Appender returns a new appender against the storage.
-func (n *noopStorage) Appender(_ context.Context) storage.Appender {
- return NewNoopAppender()
-}
-
-// Close closes the storage and all its underlying resources.
-func (n *noopStorage) Close() error {
- return nil
-}
-
-func (n *noopStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
- return nil, fmt.Errorf("promxy is not a ChunkQuerier")
-}
-
-type noopAppender struct{}
-
-// NewNoopAppender returns a new noop storage Appender
-func NewNoopAppender() storage.Appender {
- return &noopAppender{}
-}
-
-func (a *noopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) {
- return 0, nil
-}
-
-func (a *noopAppender) AddFast(ref uint64, t int64, v float64) error {
- return nil
-}
-
-// Commit submits the collected samples and purges the batch.
-func (a *noopAppender) Commit() error { return nil }
-func (a *noopAppender) Rollback() error { return nil }
diff --git a/pkg/promclient/api.go b/pkg/promclient/api.go
index f8a735a52..8e88f4c52 100644
--- a/pkg/promclient/api.go
+++ b/pkg/promclient/api.go
@@ -27,13 +27,13 @@ type PromAPIV1 struct {
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (p *PromAPIV1) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
- return p.API.LabelNames(ctx, minTime, maxTime)
+func (p *PromAPIV1) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
+ return p.API.LabelNames(ctx, matchers, minTime, maxTime)
}
// LabelValues performs a query for the values of the given label.
-func (p *PromAPIV1) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
- return p.API.LabelValues(ctx, label, minTime, maxTime)
+func (p *PromAPIV1) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
+ return p.API.LabelValues(ctx, label, matchers, minTime, maxTime)
}
// GetValue loads the raw data for a given set of matchers in the time range
diff --git a/pkg/promclient/debug.go b/pkg/promclient/debug.go
index 60d9be177..413980c7f 100644
--- a/pkg/promclient/debug.go
+++ b/pkg/promclient/debug.go
@@ -17,14 +17,14 @@ type DebugAPI struct {
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (d *DebugAPI) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
+func (d *DebugAPI) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
fields := logrus.Fields{
"api": "LabelNames",
}
logrus.WithFields(fields).Debug(d.PrefixMessage)
s := time.Now()
- v, w, err := d.A.LabelNames(ctx)
+ v, w, err := d.A.LabelNames(ctx, matchers)
fields["took"] = time.Since(s)
if logrus.GetLevel() > logrus.DebugLevel {
@@ -40,7 +40,7 @@ func (d *DebugAPI) LabelNames(ctx context.Context) ([]string, v1.Warnings, error
}
// LabelValues performs a query for the values of the given label.
-func (d *DebugAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
+func (d *DebugAPI) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
fields := logrus.Fields{
"api": "LabelValues",
"label": label,
@@ -48,7 +48,7 @@ func (d *DebugAPI) LabelValues(ctx context.Context, label string) (model.LabelVa
logrus.WithFields(fields).Debug(d.PrefixMessage)
s := time.Now()
- v, w, err := d.A.LabelValues(ctx, label)
+ v, w, err := d.A.LabelValues(ctx, label, matchers)
fields["took"] = time.Since(s)
if logrus.GetLevel() > logrus.DebugLevel {
diff --git a/pkg/promclient/ignore_error.go b/pkg/promclient/ignore_error.go
index 79631448f..6028d299d 100644
--- a/pkg/promclient/ignore_error.go
+++ b/pkg/promclient/ignore_error.go
@@ -17,14 +17,14 @@ type IgnoreErrorAPI struct {
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (n *IgnoreErrorAPI) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
- v, w, _ := n.A.LabelNames(ctx)
+func (n *IgnoreErrorAPI) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
+ v, w, _ := n.A.LabelNames(ctx, matchers)
return v, w, nil
}
// LabelValues performs a query for the values of the given label.
-func (n *IgnoreErrorAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
- v, w, _ := n.A.LabelValues(ctx, label)
+func (n *IgnoreErrorAPI) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
+ v, w, _ := n.A.LabelValues(ctx, label, matchers)
return v, w, nil
}
diff --git a/pkg/promclient/interface.go b/pkg/promclient/interface.go
index a42b871c6..ba7b73c79 100644
--- a/pkg/promclient/interface.go
+++ b/pkg/promclient/interface.go
@@ -12,9 +12,9 @@ import (
// API Subset of the interface defined in the prometheus client
type API interface {
// LabelNames returns all the unique label names present in the block in sorted order.
- LabelNames(ctx context.Context) ([]string, v1.Warnings, error)
+ LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error)
// LabelValues performs a query for the values of the given label.
- LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error)
+ LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error)
// Query performs a query for the given time.
Query(ctx context.Context, query string, ts time.Time) (model.Value, v1.Warnings, error)
// QueryRange performs a query for the given range.
diff --git a/pkg/promclient/label.go b/pkg/promclient/label.go
index 33848b166..199dc4f36 100644
--- a/pkg/promclient/label.go
+++ b/pkg/promclient/label.go
@@ -58,8 +58,8 @@ func (c *AddLabelClient) Key() model.LabelSet {
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (c *AddLabelClient) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
- l, w, err := c.API.LabelNames(ctx)
+func (c *AddLabelClient) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
+ l, w, err := c.API.LabelNames(ctx, matchers)
if err != nil {
return nil, nil, err
}
@@ -80,8 +80,8 @@ func (c *AddLabelClient) LabelNames(ctx context.Context) ([]string, v1.Warnings,
}
// LabelValues performs a query for the values of the given label.
-func (c *AddLabelClient) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
- val, w, err := c.API.LabelValues(ctx, label)
+func (c *AddLabelClient) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
+ val, w, err := c.API.LabelValues(ctx, label, matchers)
if err != nil {
return nil, w, err
}
diff --git a/pkg/promclient/multi_api.go b/pkg/promclient/multi_api.go
index 4e78d3250..d62aa0f79 100644
--- a/pkg/promclient/multi_api.go
+++ b/pkg/promclient/multi_api.go
@@ -107,7 +107,7 @@ func (m *MultiAPI) recordMetric(i int, api, status string, took float64) {
}
// LabelValues performs a query for the values of the given label.
-func (m *MultiAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
+func (m *MultiAPI) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
childContext, childContextCancel := context.WithCancel(ctx)
defer childContextCancel()
@@ -126,7 +126,7 @@ func (m *MultiAPI) LabelValues(ctx context.Context, label string) (model.LabelVa
outstandingRequests[m.apiFingerprints[i]]++
go func(i int, retChan chan chanResult, api API, label string) {
start := time.Now()
- result, w, err := api.LabelValues(childContext, label)
+ result, w, err := api.LabelValues(childContext, label, matchers)
took := time.Since(start)
if err != nil {
m.recordMetric(i, "label_values", "error", took.Seconds())
@@ -185,7 +185,7 @@ func (m *MultiAPI) LabelValues(ctx context.Context, label string) (model.LabelVa
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (m *MultiAPI) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
+func (m *MultiAPI) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
childContext, childContextCancel := context.WithCancel(ctx)
defer childContextCancel()
@@ -204,7 +204,7 @@ func (m *MultiAPI) LabelNames(ctx context.Context) ([]string, v1.Warnings, error
outstandingRequests[m.apiFingerprints[i]]++
go func(i int, retChan chan chanResult, api API) {
start := time.Now()
- result, w, err := api.LabelNames(childContext)
+ result, w, err := api.LabelNames(childContext, matchers)
took := time.Since(start)
if err != nil {
m.recordMetric(i, "label_names", "error", took.Seconds())
diff --git a/pkg/promclient/multi_api_test.go b/pkg/promclient/multi_api_test.go
index 0bb4936d7..cc03c0c60 100644
--- a/pkg/promclient/multi_api_test.go
+++ b/pkg/promclient/multi_api_test.go
@@ -22,12 +22,12 @@ type stubAPI struct {
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (s *stubAPI) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
+func (s *stubAPI) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
return s.labelNames(), nil, nil
}
// LabelValues performs a query for the values of the given label.
-func (s *stubAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
+func (s *stubAPI) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
return s.labelValues(), nil, nil
}
@@ -63,19 +63,19 @@ func (s *errorAPI) Key() model.LabelSet {
return nil
}
-func (s *errorAPI) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
+func (s *errorAPI) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
if s.err != nil {
return nil, nil, s.err
}
- return s.LabelNames(ctx)
+ return s.LabelNames(ctx, matchers)
}
// LabelValues performs a query for the values of the given label.
-func (s *errorAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
+func (s *errorAPI) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
if s.err != nil {
return nil, nil, s.err
}
- return s.LabelValues(ctx, label)
+ return s.LabelValues(ctx, label, matchers)
}
// Query performs a query for the given time.
@@ -350,7 +350,7 @@ func TestMultiAPIMerging(t *testing.T) {
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
t.Run("LabelNames", func(t *testing.T) {
- v, _, err := test.a.LabelNames(context.TODO())
+ v, _, err := test.a.LabelNames(context.TODO(), nil)
if err != nil != test.err {
if test.err {
t.Fatalf("missing expected err")
@@ -376,7 +376,7 @@ func TestMultiAPIMerging(t *testing.T) {
})
t.Run("LabelValues", func(t *testing.T) {
- v, _, err := test.a.LabelValues(context.TODO(), "a")
+ v, _, err := test.a.LabelValues(context.TODO(), "a", nil)
if err != nil != test.err {
if test.err {
t.Fatalf("missing expected err")
diff --git a/pkg/promclient/recover.go b/pkg/promclient/recover.go
index 8d707fb2d..597ba5ce6 100644
--- a/pkg/promclient/recover.go
+++ b/pkg/promclient/recover.go
@@ -14,23 +14,23 @@ import (
type recoverAPI struct{ A API }
// LabelNames returns all the unique label names present in the block in sorted order.
-func (api *recoverAPI) LabelNames(ctx context.Context) (v []string, w v1.Warnings, err error) {
+func (api *recoverAPI) LabelNames(ctx context.Context, matchers []string) (v []string, w v1.Warnings, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
- return api.A.LabelNames(ctx)
+ return api.A.LabelNames(ctx, matchers)
}
// LabelValues performs a query for the values of the given label.
-func (api *recoverAPI) LabelValues(ctx context.Context, label string) (v model.LabelValues, w v1.Warnings, err error) {
+func (api *recoverAPI) LabelValues(ctx context.Context, label string, matchers []string) (v model.LabelValues, w v1.Warnings, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
- return api.A.LabelValues(ctx, label)
+ return api.A.LabelValues(ctx, label, matchers)
}
// Query performs a query for the given time.
diff --git a/pkg/proxyquerier/querier.go b/pkg/proxyquerier/querier.go
index 124cc7241..f65deb7e8 100644
--- a/pkg/proxyquerier/querier.go
+++ b/pkg/proxyquerier/querier.go
@@ -86,16 +86,26 @@ func (h *ProxyQuerier) Select(_ bool, hints *storage.SelectHints, matchers ...*l
}
// LabelValues returns all potential values for a label name.
-func (h *ProxyQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
+func (h *ProxyQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
start := time.Now()
defer func() {
logrus.WithFields(logrus.Fields{
- "name": name,
- "took": time.Since(start),
+ "name": name,
+ "matchers": matchers,
+ "took": time.Since(start),
}).Debug("LabelValues")
}()
- result, w, err := h.Client.LabelValues(h.Ctx, name)
+ var matchersStrings []string
+ if len(matchers) > 0 {
+ s, err := promhttputil.MatcherToString(matchers)
+ if err != nil {
+ return nil, nil, err
+ }
+ matchersStrings = []string{s}
+ }
+
+ result, w, err := h.Client.LabelValues(h.Ctx, name, matchersStrings)
warnings := promhttputil.WarningsConvert(w)
if err != nil {
return nil, warnings, errors.Cause(err)
@@ -110,7 +120,7 @@ func (h *ProxyQuerier) LabelValues(name string) ([]string, storage.Warnings, err
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (h *ProxyQuerier) LabelNames() ([]string, storage.Warnings, error) {
+func (h *ProxyQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
start := time.Now()
defer func() {
logrus.WithFields(logrus.Fields{
@@ -118,7 +128,16 @@ func (h *ProxyQuerier) LabelNames() ([]string, storage.Warnings, error) {
}).Debug("LabelNames")
}()
- v, w, err := h.Client.LabelNames(h.Ctx)
+ var matchersStrings []string
+ if len(matchers) > 0 {
+ s, err := promhttputil.MatcherToString(matchers)
+ if err != nil {
+ return nil, nil, err
+ }
+ matchersStrings = []string{s}
+ }
+
+ v, w, err := h.Client.LabelNames(h.Ctx, matchersStrings)
return v, promhttputil.WarningsConvert(w), err
}
diff --git a/pkg/proxystorage/appender_stub.go b/pkg/proxystorage/appender_stub.go
index 66fd88fab..e9de16207 100644
--- a/pkg/proxystorage/appender_stub.go
+++ b/pkg/proxystorage/appender_stub.go
@@ -1,9 +1,11 @@
package proxystorage
import (
+ "fmt"
"sync"
"time"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/sirupsen/logrus"
)
@@ -15,7 +17,7 @@ type appenderStub struct{}
var appenderLock = sync.Mutex{}
var appenderWarningTime time.Time
-func (a *appenderStub) Add(l labels.Labels, t int64, v float64) (uint64, error) {
+func (a *appenderStub) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) {
appenderLock.Lock()
now := time.Now()
if now.Sub(appenderWarningTime) > time.Minute {
@@ -27,9 +29,8 @@ func (a *appenderStub) Add(l labels.Labels, t int64, v float64) (uint64, error)
return 0, nil
}
-func (a *appenderStub) AddFast(ref uint64, t int64, v float64) error {
- _, err := a.Add(nil, t, v)
- return err
+func (a *appenderStub) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
+ return 0, fmt.Errorf("not Implemented")
}
// Commit submits the collected samples and purges the batch.
diff --git a/pkg/proxystorage/proxy.go b/pkg/proxystorage/proxy.go
index e53c5e688..94cbec4ea 100644
--- a/pkg/proxystorage/proxy.go
+++ b/pkg/proxystorage/proxy.go
@@ -182,6 +182,10 @@ func (p *ProxyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (stor
return nil, errors.New("not implemented")
}
+func (p *ProxyStorage) WALReplayStatus() (tsdb.WALReplayStatus, error) {
+ return tsdb.WALReplayStatus{}, errors.New("not implemented")
+}
+
// Implement web.LocalStorage
func (p *ProxyStorage) CleanTombstones() (err error) { return nil }
func (p *ProxyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error { return nil }
@@ -409,7 +413,7 @@ func (p *ProxyStorage) NodeReplacer(ctx context.Context, s *parser.EvalStmt, nod
series[i] = &proxyquerier.Series{iterator}
}
- ret := &parser.VectorSelector{Offset: offset}
+ ret := &parser.VectorSelector{OriginalOffset: offset, Offset: offset}
ret.UnexpandedSeriesSet = proxyquerier.NewSeriesSet(series, promhttputil.WarningsConvert(warnings), err)
// Replace with sum(count_values()) BY (label)
@@ -444,7 +448,7 @@ func (p *ProxyStorage) NodeReplacer(ctx context.Context, s *parser.EvalStmt, nod
series[i] = &proxyquerier.Series{iterator}
}
- ret := &parser.VectorSelector{Offset: offset}
+ ret := &parser.VectorSelector{OriginalOffset: offset, Offset: offset}
ret.UnexpandedSeriesSet = proxyquerier.NewSeriesSet(series, promhttputil.WarningsConvert(warnings), err)
n.Expr = ret
@@ -480,7 +484,7 @@ func (p *ProxyStorage) NodeReplacer(ctx context.Context, s *parser.EvalStmt, nod
series[i] = &proxyquerier.Series{iterator}
}
- ret := &parser.VectorSelector{Offset: offset}
+ ret := &parser.VectorSelector{OriginalOffset: offset, Offset: offset}
ret.UnexpandedSeriesSet = proxyquerier.NewSeriesSet(series, promhttputil.WarningsConvert(warnings), err)
// the "scalar()" function is a bit tricky. It can return a scalar or a vector.
@@ -495,6 +499,9 @@ func (p *ProxyStorage) NodeReplacer(ctx context.Context, s *parser.EvalStmt, nod
// If we are simply fetching a Vector then we can fetch the data using the same step that
// the query came in as (reducing the amount of data we need to fetch)
case *parser.VectorSelector:
+ if n.Timestamp != nil {
+ return nil, nil
+ }
// If the vector selector already has the data we can skip
if n.UnexpandedSeriesSet != nil {
return nil, nil
diff --git a/pkg/proxystorage/util.go b/pkg/proxystorage/util.go
index 801e1a317..530e968c5 100644
--- a/pkg/proxystorage/util.go
+++ b/pkg/proxystorage/util.go
@@ -54,6 +54,15 @@ func (o *OffsetFinder) Visit(node parser.Node, _ []parser.Node) (parser.Visitor,
o.l.Lock()
defer o.l.Unlock()
switch n := node.(type) {
+ case *parser.SubqueryExpr:
+ if !o.Found {
+ o.Offset = n.Offset
+ o.Found = true
+ } else {
+ if n.Offset != o.Offset {
+ o.Error = fmt.Errorf("mismatched offsets %v %v", n.Offset, o.Offset)
+ }
+ }
case *parser.VectorSelector:
if !o.Found {
o.Offset = n.Offset
diff --git a/pkg/remote/client.go b/pkg/remote/client.go
index 2880bf8bd..2679f42b6 100644
--- a/pkg/remote/client.go
+++ b/pkg/remote/client.go
@@ -53,7 +53,7 @@ type ClientConfig struct {
// NewClient creates a new Client.
func NewClient(index int, conf *ClientConfig) (*Client, error) {
- httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage", false, false)
+ httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage")
if err != nil {
return nil, err
}
diff --git a/pkg/remote/read.go b/pkg/remote/read.go
index 7f137196d..3cac9c866 100644
--- a/pkg/remote/read.go
+++ b/pkg/remote/read.go
@@ -15,6 +15,7 @@ package remote
import (
"context"
+ "fmt"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -79,15 +80,15 @@ func (q *querier) Select(sortSeries bool, p *storage.SelectHints, matchers ...*l
}
// LabelValues implements storage.Querier and is a noop.
-func (q *querier) LabelValues(name string) ([]string, storage.Warnings, error) {
+func (q *querier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
// TODO implement?
- return nil, nil, nil
+ return nil, nil, fmt.Errorf("not implemented")
}
// LabelNames implements storage.Querier and is a noop.
-func (q *querier) LabelNames() ([]string, storage.Warnings, error) {
+func (q *querier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
// TODO implement?
- return nil, nil, nil
+ return nil, nil, fmt.Errorf("not implemented")
}
// Close implements storage.Querier and is a noop.
diff --git a/pkg/remote/write.go b/pkg/remote/write.go
index 7aa4eebf8..52ded066c 100644
--- a/pkg/remote/write.go
+++ b/pkg/remote/write.go
@@ -14,7 +14,10 @@
package remote
import (
+ "fmt"
+
"github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
)
@@ -25,7 +28,7 @@ func (s *Storage) Appender() (storage.Appender, error) {
}
// Add implements storage.Appender.
-func (s *Storage) Add(l labels.Labels, t int64, v float64) (uint64, error) {
+func (s *Storage) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
for _, q := range s.queues {
@@ -40,9 +43,8 @@ func (s *Storage) Add(l labels.Labels, t int64, v float64) (uint64, error) {
return 0, nil
}
-// AddFast implements storage.Appender.
-func (s *Storage) AddFast(_ uint64, t int64, v float64) error {
- return storage.ErrNotFound
+func (s *Storage) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
+ return 0, fmt.Errorf("not supported")
}
// Commit implements storage.Appender.
diff --git a/pkg/server/api.go b/pkg/server/api.go
index 997223c85..aceb1c69c 100644
--- a/pkg/server/api.go
+++ b/pkg/server/api.go
@@ -8,7 +8,6 @@ import (
"time"
"github.com/julienschmidt/httprouter"
- "github.com/prometheus/common/log"
"github.com/prometheus/exporter-toolkit/web"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
@@ -57,7 +56,7 @@ func createAndStartHTTP(srv *http.Server) (*http.Server, error) {
if err == http.ErrServerClosed {
return
}
- log.Errorf("Error listening: %v", err)
+ logrus.Errorf("Error listening: %v", err)
}
}()
return srv, nil
@@ -77,7 +76,7 @@ func createAndStartHTTPS(srv *http.Server, tlsConfigFile string) (*http.Server,
if err == http.ErrServerClosed {
return
}
- log.Errorf("Error listening: %v", err)
+ logrus.Errorf("Error listening: %v", err)
}
}()
return srv, nil
diff --git a/pkg/server/api_test.go b/pkg/server/api_test.go
index 935887dcc..a8ccbceea 100644
--- a/pkg/server/api_test.go
+++ b/pkg/server/api_test.go
@@ -18,7 +18,7 @@ import (
func TestUnauthenticatedServerFunctions(t *testing.T) {
freePort, err := getFreePort()
if err != nil {
- t.Errorf("could not get a free port to run test: %s", err.Error())
+ t.Fatalf("could not get a free port to run test: %s", err.Error())
}
bindAddr := fmt.Sprintf("localhost:%d", freePort)
router := httprouter.New()
@@ -26,29 +26,30 @@ func TestUnauthenticatedServerFunctions(t *testing.T) {
server, err := CreateAndStart(bindAddr, "text", time.Second*5, nil, router, "")
if err != nil {
- t.Errorf("an error occured during creation of server: %s", err.Error())
+ t.Fatalf("an error occured during creation of server: %s", err.Error())
}
client := &http.Client{
Transport: &http.Transport{},
}
+ time.Sleep(time.Millisecond * 5)
resp, err := client.Get(fmt.Sprintf("http://%s/metrics", bindAddr))
if err != nil {
- t.Errorf("could not make request to metrics endpoint: %s", err.Error())
+ t.Fatalf("could not make request to metrics endpoint: %s", err.Error())
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
- t.Errorf("could not read response body: %s", err.Error())
+ t.Fatalf("could not read response body: %s", err.Error())
}
if resp.StatusCode != http.StatusOK {
- t.Errorf("an unexpected error occurred: unauthenticed client was unable to make a request to the unauthenticated server. Response body: %s", body)
+ t.Fatalf("an unexpected error occurred: unauthenticed client was unable to make a request to the unauthenticated server. Response body: %s", body)
}
if !strings.Contains(string(body), "go_goroutines") {
- t.Errorf("could not find metric name 'go_goroutines' in response")
+ t.Fatalf("could not find metric name 'go_goroutines' in response")
}
server.Close()
}
@@ -56,7 +57,7 @@ func TestUnauthenticatedServerFunctions(t *testing.T) {
func TestAuthenticatedServerDoesNotStartupWithInvalidConfig(t *testing.T) {
freePort, err := getFreePort()
if err != nil {
- t.Errorf("could not get a free port to run test: %s", err.Error())
+ t.Fatalf("could not get a free port to run test: %s", err.Error())
}
bindAddr := fmt.Sprintf("localhost:%d", freePort)
router := httprouter.New()
@@ -64,7 +65,7 @@ func TestAuthenticatedServerDoesNotStartupWithInvalidConfig(t *testing.T) {
server, err := CreateAndStart(bindAddr, "text", time.Second*5, nil, router, "testdata/invalid-tls-server-config.yml")
if err == nil {
- t.Errorf("server validated an invalid tlsConfig")
+ t.Fatalf("server validated an invalid tlsConfig")
}
if server != nil {
@@ -75,7 +76,7 @@ func TestAuthenticatedServerDoesNotStartupWithInvalidConfig(t *testing.T) {
func TestMutualTLSClientCannotConnectToAuthenticatedServerWithoutCerts(t *testing.T) {
freePort, err := getFreePort()
if err != nil {
- t.Errorf("could not get a free port to run test: %s", err.Error())
+ t.Fatalf("could not get a free port to run test: %s", err.Error())
}
bindAddr := fmt.Sprintf("localhost:%d", freePort)
router := httprouter.New()
@@ -83,7 +84,7 @@ func TestMutualTLSClientCannotConnectToAuthenticatedServerWithoutCerts(t *testin
server, err := CreateAndStart(bindAddr, "text", time.Second*5, nil, router, "testdata/tls-server-config.yml")
if err != nil {
- t.Errorf("an error occured during creation of server: %s", err.Error())
+ t.Fatalf("an error occured during creation of server: %s", err.Error())
}
client := &http.Client{
@@ -96,7 +97,7 @@ func TestMutualTLSClientCannotConnectToAuthenticatedServerWithoutCerts(t *testin
_, err = client.Get(fmt.Sprintf("https://%s/metrics", bindAddr))
if err == nil {
- t.Errorf("was able to make a request to metrics endpoint when it should no have: %s", err.Error())
+ t.Fatalf("was able to make a request to metrics endpoint when it should no have: %s", err.Error())
}
server.Close()
@@ -105,7 +106,7 @@ func TestMutualTLSClientCannotConnectToAuthenticatedServerWithoutCerts(t *testin
func TestMutualTLSClientCanConnectToAuthenticatedServerWithCerts(t *testing.T) {
freePort, err := getFreePort()
if err != nil {
- t.Errorf("could not get a free port to run test: %s", err.Error())
+ t.Fatalf("could not get a free port to run test: %s", err.Error())
}
bindAddr := fmt.Sprintf("localhost:%d", freePort)
router := httprouter.New()
@@ -113,27 +114,27 @@ func TestMutualTLSClientCanConnectToAuthenticatedServerWithCerts(t *testing.T) {
server, err := CreateAndStart(bindAddr, "text", time.Second*5, nil, router, "testdata/tls-server-config.yml")
if err != nil {
- t.Errorf("an error occured during creation of server: %s", err.Error())
+ t.Fatalf("an error occured during creation of server: %s", err.Error())
}
client := setupAuthenticatedClient(t)
resp, err := client.Get(fmt.Sprintf("https://%s/metrics", bindAddr))
if err != nil {
- t.Errorf("could not make request to metrics endpoint: %s", err.Error())
+ t.Fatalf("could not make request to metrics endpoint: %s", err.Error())
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
- t.Errorf("could not read response body: %s", err.Error())
+ t.Fatalf("could not read response body: %s", err.Error())
}
if resp.StatusCode != http.StatusOK {
- t.Errorf("authenticated client was unable to make a request to the authenticated server. Response body: %s", body)
+ t.Fatalf("authenticated client was unable to make a request to the authenticated server. Response body: %s", body)
}
if !strings.Contains(string(body), "go_goroutines") {
- t.Errorf("could not find metric name 'go_goroutines' in response")
+ t.Fatalf("could not find metric name 'go_goroutines' in response")
}
server.Close()
}
@@ -155,14 +156,14 @@ func getFreePort() (int, error) {
func setupAuthenticatedClient(t *testing.T) *http.Client {
caCert, err := ioutil.ReadFile("testdata/test-ca.crt")
if err != nil {
- t.Errorf("could not read ca certificate: %s", err)
+ t.Fatalf("could not read ca certificate: %s", err)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
clientCert, err := tls.LoadX509KeyPair("testdata/client.crt", "testdata/client.key")
if err != nil {
- t.Errorf("could not create keypair from file: %s", err)
+ t.Fatalf("could not create keypair from file: %s", err)
}
return &http.Client{
diff --git a/pkg/servergroup/servergroup.go b/pkg/servergroup/servergroup.go
index f169b8de6..3e6c85a64 100644
--- a/pkg/servergroup/servergroup.go
+++ b/pkg/servergroup/servergroup.go
@@ -268,9 +268,9 @@ func (s *ServerGroup) ApplyConfig(cfg *Config) error {
// If a bearer token is provided, create a round tripper that will set the
// Authorization header correctly on each request.
if len(cfg.HTTPConfig.HTTPConfig.BearerToken) > 0 {
- rt = config_util.NewBearerAuthRoundTripper(cfg.HTTPConfig.HTTPConfig.BearerToken, rt)
+ rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerToken, rt)
} else if len(cfg.HTTPConfig.HTTPConfig.BearerTokenFile) > 0 {
- rt = config_util.NewBearerAuthFileRoundTripper(cfg.HTTPConfig.HTTPConfig.BearerTokenFile, rt)
+ rt = config_util.NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerTokenFile, rt)
}
if cfg.HTTPConfig.HTTPConfig.BasicAuth != nil {
@@ -310,13 +310,13 @@ func (s *ServerGroup) QueryRange(ctx context.Context, query string, r v1.Range)
}
// LabelValues performs a query for the values of the given label.
-func (s *ServerGroup) LabelValues(ctx context.Context, label string) (model.LabelValues, v1.Warnings, error) {
- return s.State().apiClient.LabelValues(ctx, label)
+func (s *ServerGroup) LabelValues(ctx context.Context, label string, matchers []string) (model.LabelValues, v1.Warnings, error) {
+ return s.State().apiClient.LabelValues(ctx, label, matchers)
}
// LabelNames returns all the unique label names present in the block in sorted order.
-func (s *ServerGroup) LabelNames(ctx context.Context) ([]string, v1.Warnings, error) {
- return s.State().apiClient.LabelNames(ctx)
+func (s *ServerGroup) LabelNames(ctx context.Context, matchers []string) ([]string, v1.Warnings, error) {
+ return s.State().apiClient.LabelNames(ctx, matchers)
}
// Series finds series by label matchers.
diff --git a/test/promql_test.go b/test/promql_test.go
index 5717f43aa..8b7f9fad2 100644
--- a/test/promql_test.go
+++ b/test/promql_test.go
@@ -65,17 +65,11 @@ promxy:
- localhost:8083
labels:
az: a
- http_client:
- tls_config:
- insecure_skip_verify: true
- static_configs:
- targets:
- localhost:8085
labels:
az: b
- http_client:
- tls_config:
- insecure_skip_verify: true
`
const rawDoublePSConfigRR = `
@@ -87,18 +81,12 @@ promxy:
labels:
az: a
remote_read: true
- http_client:
- tls_config:
- insecure_skip_verify: true
- static_configs:
- targets:
- localhost:8085
labels:
az: b
remote_read: true
- http_client:
- tls_config:
- insecure_skip_verify: true
`
func getProxyStorage(cfg string) *proxystorage.ProxyStorage {
@@ -129,10 +117,14 @@ func startAPIForTest(s storage.Storage, listen string) (*http.Server, chan struc
api := v1.NewAPI(
promql.NewEngine(promql.EngineOpts{
- Timeout: 10 * time.Minute,
- MaxSamples: 50000000,
+ Timeout: 10 * time.Minute,
+ MaxSamples: 50000000,
+ NoStepSubqueryIntervalFn: func(int64) int64 { return (1 * time.Minute).Milliseconds() },
+ EnableAtModifier: true,
}),
s.(storage.SampleAndChunkQueryable),
+ nil, //appendable
+ nil, // exemplarQueryable
nil, //factoryTr
nil, //factoryAr
cfgFunc,
@@ -144,7 +136,7 @@ func startAPIForTest(s storage.Storage, listen string) (*http.Server, chan struc
}, // global URL options
readyFunc, // ready
nil, // local storage
- "", //tsdb dir
+ "", // tsdb dir
false, // enable admin API
nil, // logger
nil, // FactoryRr
@@ -153,8 +145,9 @@ func startAPIForTest(s storage.Storage, listen string) (*http.Server, chan struc
1048576, // RemoteReadBytesInFrame
nil, // CORSOrigin
nil, // runtimeInfo
- nil, // versionInfo
+ nil, // buildInfo
nil, // gatherer
+ nil, // registerer
)
apiRouter := route.New()
@@ -167,7 +160,7 @@ func startAPIForTest(s storage.Storage, listen string) (*http.Server, chan struc
go func() {
defer close(stopChan)
close(startChan)
- if err := srv.ListenAndServe(); err != nil {
+ if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
fmt.Println("Error listening to", listen, err)
}
}()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
deleted file mode 100644
index 047555ec7..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2020 Microsoft Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt
new file mode 100644
index 000000000..05b0ebf5b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) Microsoft Corporation.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE
deleted file mode 100644
index 2d1d72608..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Microsoft Azure-SDK-for-Go
-Copyright 2014-2017 Microsoft
-
-This product includes software developed at
-the Microsoft Corporation (https://www.microsoft.com).
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt
new file mode 100644
index 000000000..a338672ec
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt
@@ -0,0 +1,29 @@
+NOTICES AND INFORMATION
+Do Not Translate or Localize
+
+This software incorporates material from third parties. Microsoft makes certain
+open source code available at https://3rdpartysource.microsoft.com, or you may
+send a check or money order for US $5.00, including the product name, the open
+source component name, and version number, to:
+
+Source Code Compliance Team
+Microsoft Corporation
+One Microsoft Way
+Redmond, WA 98052
+USA
+
+Notwithstanding any other terms, you may reverse engineer this software to the
+extent required to debug changes to any libraries licensed under the GNU Lesser
+General Public License.
+
+------------------------------------------------------------------------------
+
+Azure SDK for Go uses third-party libraries or other resources that may be
+distributed under licenses different than the Azure SDK for Go software.
+
+In the event that we accidentally failed to list a required notice, please
+bring it to our attention. Post an issue or email us:
+
+ azgosdkhelp@microsoft.com
+
+The attached notices are provided for information only.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md
index d590c4927..4a3834a4c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md
@@ -1,25 +1,44 @@
-Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82
+# Change History
-Code generator @microsoft.azure/autorest.go@~2.1.161
+## Additive Changes
-## Breaking Changes
+### New Funcs
-- Function `NewContainerServiceListResultPage` parameter(s) have been changed from `(func(context.Context, ContainerServiceListResult) (ContainerServiceListResult, error))` to `(ContainerServiceListResult, func(context.Context, ContainerServiceListResult) (ContainerServiceListResult, error))`
-- Function `NewVirtualMachineScaleSetExtensionListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error))` to `(VirtualMachineScaleSetExtensionListResult, func(context.Context, VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error))`
-- Function `NewGalleryImageVersionListPage` parameter(s) have been changed from `(func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error))` to `(GalleryImageVersionList, func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error))`
-- Function `NewVirtualMachineScaleSetListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error))` to `(VirtualMachineScaleSetListResult, func(context.Context, VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error))`
-- Function `NewProximityPlacementGroupListResultPage` parameter(s) have been changed from `(func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error))` to `(ProximityPlacementGroupListResult, func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error))`
-- Function `NewResourceSkusResultPage` parameter(s) have been changed from `(func(context.Context, ResourceSkusResult) (ResourceSkusResult, error))` to `(ResourceSkusResult, func(context.Context, ResourceSkusResult) (ResourceSkusResult, error))`
-- Function `NewVirtualMachineScaleSetListOSUpgradeHistoryPage` parameter(s) have been changed from `(func(context.Context, VirtualMachineScaleSetListOSUpgradeHistory) (VirtualMachineScaleSetListOSUpgradeHistory, error))` to `(VirtualMachineScaleSetListOSUpgradeHistory, func(context.Context, VirtualMachineScaleSetListOSUpgradeHistory) (VirtualMachineScaleSetListOSUpgradeHistory, error))`
-- Function `NewVirtualMachineScaleSetListWithLinkResultPage` parameter(s) have been changed from `(func(context.Context, VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error))` to `(VirtualMachineScaleSetListWithLinkResult, func(context.Context, VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error))`
-- Function `NewGalleryImageListPage` parameter(s) have been changed from `(func(context.Context, GalleryImageList) (GalleryImageList, error))` to `(GalleryImageList, func(context.Context, GalleryImageList) (GalleryImageList, error))`
-- Function `NewVirtualMachineListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualMachineListResult) (VirtualMachineListResult, error))` to `(VirtualMachineListResult, func(context.Context, VirtualMachineListResult) (VirtualMachineListResult, error))`
-- Function `NewVirtualMachineScaleSetListSkusResultPage` parameter(s) have been changed from `(func(context.Context, VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error))` to `(VirtualMachineScaleSetListSkusResult, func(context.Context, VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error))`
-- Function `NewDiskListPage` parameter(s) have been changed from `(func(context.Context, DiskList) (DiskList, error))` to `(DiskList, func(context.Context, DiskList) (DiskList, error))`
-- Function `NewVirtualMachineScaleSetVMListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error))` to `(VirtualMachineScaleSetVMListResult, func(context.Context, VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error))`
-- Function `NewImageListResultPage` parameter(s) have been changed from `(func(context.Context, ImageListResult) (ImageListResult, error))` to `(ImageListResult, func(context.Context, ImageListResult) (ImageListResult, error))`
-- Function `NewRunCommandListResultPage` parameter(s) have been changed from `(func(context.Context, RunCommandListResult) (RunCommandListResult, error))` to `(RunCommandListResult, func(context.Context, RunCommandListResult) (RunCommandListResult, error))`
-- Function `NewGalleryListPage` parameter(s) have been changed from `(func(context.Context, GalleryList) (GalleryList, error))` to `(GalleryList, func(context.Context, GalleryList) (GalleryList, error))`
-- Function `NewListUsagesResultPage` parameter(s) have been changed from `(func(context.Context, ListUsagesResult) (ListUsagesResult, error))` to `(ListUsagesResult, func(context.Context, ListUsagesResult) (ListUsagesResult, error))`
-- Function `NewSnapshotListPage` parameter(s) have been changed from `(func(context.Context, SnapshotList) (SnapshotList, error))` to `(SnapshotList, func(context.Context, SnapshotList) (SnapshotList, error))`
-- Function `NewAvailabilitySetListResultPage` parameter(s) have been changed from `(func(context.Context, AvailabilitySetListResult) (AvailabilitySetListResult, error))` to `(AvailabilitySetListResult, func(context.Context, AvailabilitySetListResult) (AvailabilitySetListResult, error))`
+1. AccessURI.MarshalJSON() ([]byte, error)
+1. BootDiagnosticsInstanceView.MarshalJSON() ([]byte, error)
+1. DataDiskImage.MarshalJSON() ([]byte, error)
+1. GalleryDataDiskImage.MarshalJSON() ([]byte, error)
+1. GalleryDiskImage.MarshalJSON() ([]byte, error)
+1. GalleryIdentifier.MarshalJSON() ([]byte, error)
+1. GalleryImageVersionStorageProfile.MarshalJSON() ([]byte, error)
+1. GalleryOSDiskImage.MarshalJSON() ([]byte, error)
+1. LogAnalyticsOperationResult.MarshalJSON() ([]byte, error)
+1. LogAnalyticsOutput.MarshalJSON() ([]byte, error)
+1. OperationListResult.MarshalJSON() ([]byte, error)
+1. OperationValueDisplay.MarshalJSON() ([]byte, error)
+1. RecoveryWalkResponse.MarshalJSON() ([]byte, error)
+1. RegionalReplicationStatus.MarshalJSON() ([]byte, error)
+1. ReplicationStatus.MarshalJSON() ([]byte, error)
+1. ResourceSku.MarshalJSON() ([]byte, error)
+1. ResourceSkuCapabilities.MarshalJSON() ([]byte, error)
+1. ResourceSkuCapacity.MarshalJSON() ([]byte, error)
+1. ResourceSkuCosts.MarshalJSON() ([]byte, error)
+1. ResourceSkuLocationInfo.MarshalJSON() ([]byte, error)
+1. ResourceSkuRestrictionInfo.MarshalJSON() ([]byte, error)
+1. ResourceSkuRestrictions.MarshalJSON() ([]byte, error)
+1. RollbackStatusInfo.MarshalJSON() ([]byte, error)
+1. RollingUpgradeProgressInfo.MarshalJSON() ([]byte, error)
+1. RollingUpgradeRunningStatus.MarshalJSON() ([]byte, error)
+1. RollingUpgradeStatusInfoProperties.MarshalJSON() ([]byte, error)
+1. SubResourceReadOnly.MarshalJSON() ([]byte, error)
+1. UpgradeOperationHistoricalStatusInfo.MarshalJSON() ([]byte, error)
+1. UpgradeOperationHistoricalStatusInfoProperties.MarshalJSON() ([]byte, error)
+1. UpgradeOperationHistoryStatus.MarshalJSON() ([]byte, error)
+1. VirtualMachineHealthStatus.MarshalJSON() ([]byte, error)
+1. VirtualMachineIdentityUserAssignedIdentitiesValue.MarshalJSON() ([]byte, error)
+1. VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue.MarshalJSON() ([]byte, error)
+1. VirtualMachineScaleSetInstanceViewStatusesSummary.MarshalJSON() ([]byte, error)
+1. VirtualMachineScaleSetSku.MarshalJSON() ([]byte, error)
+1. VirtualMachineScaleSetSkuCapacity.MarshalJSON() ([]byte, error)
+1. VirtualMachineScaleSetVMExtensionsSummary.MarshalJSON() ([]byte, error)
+1. VirtualMachineStatusCodeCount.MarshalJSON() ([]byte, error)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/_meta.json
new file mode 100644
index 000000000..916acf762
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/_meta.json
@@ -0,0 +1,11 @@
+{
+ "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82",
+ "readme": "/_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
+ "tag": "package-2018-10-01",
+ "use": "@microsoft.azure/autorest.go@2.1.183",
+ "repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
+ "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-2018-10-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
+ "additional_properties": {
+ "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION"
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go
index 49bbbf4fe..8c5fed693 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -307,6 +296,7 @@ func (client AvailabilitySetsClient) List(ctx context.Context, resourceGroupName
}
if result.aslr.hasNextLink() && result.aslr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -367,7 +357,6 @@ func (client AvailabilitySetsClient) listNextResults(ctx context.Context, lastRe
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -500,6 +489,7 @@ func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context, exp
}
if result.aslr.hasNextLink() && result.aslr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -562,7 +552,6 @@ func (client AvailabilitySetsClient) listBySubscriptionNextResults(ctx context.C
result, err = client.ListBySubscriptionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go
index d9d043019..1812f27fe 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go
@@ -3,19 +3,8 @@
// Compute Client
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go
index f369af493..d27f363da 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -99,7 +88,7 @@ func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resour
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -137,7 +126,10 @@ func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -165,8 +157,8 @@ func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -179,7 +171,7 @@ func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupN
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", nil, "Failure sending request")
return
}
@@ -215,7 +207,10 @@ func (client ContainerServicesClient) DeleteSender(req *http.Request) (future Co
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -342,6 +337,7 @@ func (client ContainerServicesClient) List(ctx context.Context) (result Containe
}
if result.cslr.hasNextLink() && result.cslr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -401,7 +397,6 @@ func (client ContainerServicesClient) listNextResults(ctx context.Context, lastR
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -459,6 +454,7 @@ func (client ContainerServicesClient) ListByResourceGroup(ctx context.Context, r
}
if result.cslr.hasNextLink() && result.cslr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -519,7 +515,6 @@ func (client ContainerServicesClient) listByResourceGroupNextResults(ctx context
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go
index d8bbe279b..0b69e93db 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -89,7 +78,7 @@ func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -128,7 +117,10 @@ func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksC
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -155,8 +147,8 @@ func (client DisksClient) Delete(ctx context.Context, resourceGroupName string,
ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -169,7 +161,7 @@ func (client DisksClient) Delete(ctx context.Context, resourceGroupName string,
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", nil, "Failure sending request")
return
}
@@ -205,7 +197,10 @@ func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -310,8 +305,8 @@ func (client DisksClient) GrantAccess(ctx context.Context, resourceGroupName str
ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.GrantAccess")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -330,7 +325,7 @@ func (client DisksClient) GrantAccess(ctx context.Context, resourceGroupName str
result, err = client.GrantAccessSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", nil, "Failure sending request")
return
}
@@ -368,7 +363,10 @@ func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGran
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -417,6 +415,7 @@ func (client DisksClient) List(ctx context.Context) (result DiskListPage, err er
}
if result.dl.hasNextLink() && result.dl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -476,7 +475,6 @@ func (client DisksClient) listNextResults(ctx context.Context, lastResults DiskL
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -532,6 +530,7 @@ func (client DisksClient) ListByResourceGroup(ctx context.Context, resourceGroup
}
if result.dl.hasNextLink() && result.dl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -592,7 +591,6 @@ func (client DisksClient) listByResourceGroupNextResults(ctx context.Context, la
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -624,8 +622,8 @@ func (client DisksClient) RevokeAccess(ctx context.Context, resourceGroupName st
ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.RevokeAccess")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -638,7 +636,7 @@ func (client DisksClient) RevokeAccess(ctx context.Context, resourceGroupName st
result, err = client.RevokeAccessSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", nil, "Failure sending request")
return
}
@@ -674,7 +672,10 @@ func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRev
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -701,8 +702,8 @@ func (client DisksClient) Update(ctx context.Context, resourceGroupName string,
ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -715,7 +716,7 @@ func (client DisksClient) Update(ctx context.Context, resourceGroupName string,
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", nil, "Failure sending request")
return
}
@@ -753,7 +754,10 @@ func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go
index 93b13fac8..4ff08cdd5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go
index 5e505fe67..7c738a3c4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client GalleriesClient) CreateOrUpdate(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client GalleriesClient) CreateOrUpdate(ctx context.Context, resourceGroupN
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future Ga
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client GalleriesClient) Delete(ctx context.Context, resourceGroupName stri
ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client GalleriesClient) Delete(ctx context.Context, resourceGroupName stri
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesD
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -303,6 +298,7 @@ func (client GalleriesClient) List(ctx context.Context) (result GalleryListPage,
}
if result.gl.hasNextLink() && result.gl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -362,7 +358,6 @@ func (client GalleriesClient) listNextResults(ctx context.Context, lastResults G
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -418,6 +413,7 @@ func (client GalleriesClient) ListByResourceGroup(ctx context.Context, resourceG
}
if result.gl.hasNextLink() && result.gl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -478,7 +474,6 @@ func (client GalleriesClient) listByResourceGroupNextResults(ctx context.Context
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go
index 86f92a8c9..953838211 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -55,8 +44,8 @@ func (client GalleryImagesClient) CreateOrUpdate(ctx context.Context, resourceGr
ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -81,7 +70,7 @@ func (client GalleryImagesClient) CreateOrUpdate(ctx context.Context, resourceGr
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -120,7 +109,10 @@ func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (futur
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -146,8 +138,8 @@ func (client GalleryImagesClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -160,7 +152,7 @@ func (client GalleryImagesClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", nil, "Failure sending request")
return
}
@@ -197,7 +189,10 @@ func (client GalleryImagesClient) DeleteSender(req *http.Request) (future Galler
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -326,6 +321,7 @@ func (client GalleryImagesClient) ListByGallery(ctx context.Context, resourceGro
}
if result.gil.hasNextLink() && result.gil.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -387,7 +383,6 @@ func (client GalleryImagesClient) listByGalleryNextResults(ctx context.Context,
result, err = client.ListByGalleryResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go
index 716896ac2..5f327efa5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -57,8 +46,8 @@ func (client GalleryImageVersionsClient) CreateOrUpdate(ctx context.Context, res
ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -78,7 +67,7 @@ func (client GalleryImageVersionsClient) CreateOrUpdate(ctx context.Context, res
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -118,7 +107,10 @@ func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -145,8 +137,8 @@ func (client GalleryImageVersionsClient) Delete(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -159,7 +151,7 @@ func (client GalleryImageVersionsClient) Delete(ctx context.Context, resourceGro
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -197,7 +189,10 @@ func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -334,6 +329,7 @@ func (client GalleryImageVersionsClient) ListByGalleryImage(ctx context.Context,
}
if result.givl.hasNextLink() && result.givl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -396,7 +392,6 @@ func (client GalleryImageVersionsClient) listByGalleryImageNextResults(ctx conte
result, err = client.ListByGalleryImageResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go
index d6161d84e..dd8cdf42d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client ImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client ImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -103,7 +92,10 @@ func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future Image
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -128,8 +120,8 @@ func (client ImagesClient) Delete(ctx context.Context, resourceGroupName string,
ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -142,7 +134,7 @@ func (client ImagesClient) Delete(ctx context.Context, resourceGroupName string,
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", nil, "Failure sending request")
return
}
@@ -178,7 +170,10 @@ func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteF
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -307,6 +302,7 @@ func (client ImagesClient) List(ctx context.Context) (result ImageListResultPage
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -366,7 +362,6 @@ func (client ImagesClient) listNextResults(ctx context.Context, lastResults Imag
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ImagesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -422,6 +417,7 @@ func (client ImagesClient) ListByResourceGroup(ctx context.Context, resourceGrou
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -482,7 +478,6 @@ func (client ImagesClient) listByResourceGroupNextResults(ctx context.Context, l
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ImagesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -513,8 +508,8 @@ func (client ImagesClient) Update(ctx context.Context, resourceGroupName string,
ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -527,7 +522,7 @@ func (client ImagesClient) Update(ctx context.Context, resourceGroupName string,
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Update", nil, "Failure sending request")
return
}
@@ -565,7 +560,10 @@ func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateF
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go
index 126ee7a3b..79c4adecc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client LogAnalyticsClient) ExportRequestRateByInterval(ctx context.Context
ctx = tracing.StartSpan(ctx, fqdn+"/LogAnalyticsClient.ExportRequestRateByInterval")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -72,7 +61,7 @@ func (client LogAnalyticsClient) ExportRequestRateByInterval(ctx context.Context
result, err = client.ExportRequestRateByIntervalSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", nil, "Failure sending request")
return
}
@@ -109,7 +98,10 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Req
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -135,8 +127,8 @@ func (client LogAnalyticsClient) ExportThrottledRequests(ctx context.Context, pa
ctx = tracing.StartSpan(ctx, fqdn+"/LogAnalyticsClient.ExportThrottledRequests")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -155,7 +147,7 @@ func (client LogAnalyticsClient) ExportThrottledRequests(ctx context.Context, pa
result, err = client.ExportThrottledRequestsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", nil, "Failure sending request")
return
}
@@ -192,7 +184,10 @@ func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go
index 18a26f6ba..9aed45caa 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -38,6 +27,12 @@ type AccessURI struct {
AccessSAS *string `json:"accessSAS,omitempty"`
}
+// MarshalJSON is the custom marshaler for AccessURI.
+func (au AccessURI) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// AdditionalCapabilities enables or disables a capability on the virtual machine or virtual machine scale
// set.
type AdditionalCapabilities struct {
@@ -513,6 +508,12 @@ type BootDiagnosticsInstanceView struct {
Status *InstanceViewStatus `json:"status,omitempty"`
}
+// MarshalJSON is the custom marshaler for BootDiagnosticsInstanceView.
+func (bdiv BootDiagnosticsInstanceView) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// CloudError an error response from the Gallery service.
type CloudError struct {
Error *APIError `json:"error,omitempty"`
@@ -912,12 +913,25 @@ func (csp ContainerServiceProperties) MarshalJSON() ([]byte, error) {
// ContainerServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ContainerServicesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ContainerServicesClient) (ContainerService, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ContainerServicesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ContainerServicesCreateOrUpdateFuture) Result(client ContainerServicesClient) (cs ContainerService, err error) {
+// result is the default implementation for ContainerServicesCreateOrUpdateFuture.Result.
+func (future *ContainerServicesCreateOrUpdateFuture) result(client ContainerServicesClient) (cs ContainerService, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -925,6 +939,7 @@ func (future *ContainerServicesCreateOrUpdateFuture) Result(client ContainerServ
return
}
if !done {
+ cs.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesCreateOrUpdateFuture")
return
}
@@ -941,12 +956,25 @@ func (future *ContainerServicesCreateOrUpdateFuture) Result(client ContainerServ
// ContainerServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ContainerServicesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ContainerServicesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ContainerServicesDeleteFuture) Result(client ContainerServicesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ContainerServicesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ContainerServicesDeleteFuture.Result.
+func (future *ContainerServicesDeleteFuture) result(client ContainerServicesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -954,6 +982,7 @@ func (future *ContainerServicesDeleteFuture) Result(client ContainerServicesClie
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesDeleteFuture")
return
}
@@ -1049,6 +1078,12 @@ type DataDiskImage struct {
Lun *int32 `json:"lun,omitempty"`
}
+// MarshalJSON is the custom marshaler for DataDiskImage.
+func (ddi DataDiskImage) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// DiagnosticsProfile specifies the boot diagnostic settings state.
Minimum api-version:
// 2015-06-15.
type DiagnosticsProfile struct {
@@ -1433,12 +1468,25 @@ func (dp DiskProperties) MarshalJSON() ([]byte, error) {
// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type DisksCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DisksClient) (Disk, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DisksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for DisksCreateOrUpdateFuture.Result.
+func (future *DisksCreateOrUpdateFuture) result(client DisksClient) (d Disk, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -1446,6 +1494,7 @@ func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err
return
}
if !done {
+ d.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture")
return
}
@@ -1461,12 +1510,25 @@ func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err
// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type DisksDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DisksClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DisksDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Response, err error) {
+// result is the default implementation for DisksDeleteFuture.Result.
+func (future *DisksDeleteFuture) result(client DisksClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -1474,6 +1536,7 @@ func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Respons
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture")
return
}
@@ -1484,12 +1547,25 @@ func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Respons
// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type DisksGrantAccessFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DisksClient) (AccessURI, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DisksGrantAccessFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) {
+// result is the default implementation for DisksGrantAccessFuture.Result.
+func (future *DisksGrantAccessFuture) result(client DisksClient) (au AccessURI, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -1497,6 +1573,7 @@ func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI,
return
}
if !done {
+ au.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture")
return
}
@@ -1530,12 +1607,25 @@ func (ds DiskSku) MarshalJSON() ([]byte, error) {
// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type DisksRevokeAccessFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DisksClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DisksRevokeAccessFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.Response, err error) {
+// result is the default implementation for DisksRevokeAccessFuture.Result.
+func (future *DisksRevokeAccessFuture) result(client DisksClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -1543,6 +1633,7 @@ func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.R
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture")
return
}
@@ -1552,12 +1643,25 @@ func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.R
// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type DisksUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DisksClient) (Disk, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DisksUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for DisksUpdateFuture.Result.
+func (future *DisksUpdateFuture) result(client DisksClient) (d Disk, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -1565,6 +1669,7 @@ func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error)
return
}
if !done {
+ d.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture")
return
}
@@ -1670,12 +1775,25 @@ type EncryptionSettings struct {
// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type GalleriesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(GalleriesClient) (Gallery, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *GalleriesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) {
+// result is the default implementation for GalleriesCreateOrUpdateFuture.Result.
+func (future *GalleriesCreateOrUpdateFuture) result(client GalleriesClient) (g Gallery, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -1683,6 +1801,7 @@ func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g G
return
}
if !done {
+ g.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture")
return
}
@@ -1699,12 +1818,25 @@ func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g G
// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type GalleriesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(GalleriesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *GalleriesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for GalleriesDeleteFuture.Result.
+func (future *GalleriesDeleteFuture) result(client GalleriesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -1712,6 +1844,7 @@ func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture")
return
}
@@ -1841,6 +1974,12 @@ type GalleryDataDiskImage struct {
HostCaching HostCaching `json:"hostCaching,omitempty"`
}
+// MarshalJSON is the custom marshaler for GalleryDataDiskImage.
+func (gddi GalleryDataDiskImage) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// GalleryDiskImage this is the disk image base class.
type GalleryDiskImage struct {
// SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created.
@@ -1849,12 +1988,24 @@ type GalleryDiskImage struct {
HostCaching HostCaching `json:"hostCaching,omitempty"`
}
+// MarshalJSON is the custom marshaler for GalleryDiskImage.
+func (gdi GalleryDiskImage) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// GalleryIdentifier describes the gallery unique name.
type GalleryIdentifier struct {
// UniqueName - READ-ONLY; The unique name of the Shared Image Gallery. This name is generated automatically by Azure.
UniqueName *string `json:"uniqueName,omitempty"`
}
+// MarshalJSON is the custom marshaler for GalleryIdentifier.
+func (gi GalleryIdentifier) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// GalleryImage specifies information about the gallery Image Definition that you want to create or update.
type GalleryImage struct {
autorest.Response `json:"-"`
@@ -2190,12 +2341,25 @@ func (gip GalleryImageProperties) MarshalJSON() ([]byte, error) {
// GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type GalleryImagesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(GalleryImagesClient) (GalleryImage, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *GalleryImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *GalleryImagesCreateOrUpdateFuture) Result(client GalleryImagesClient) (gi GalleryImage, err error) {
+// result is the default implementation for GalleryImagesCreateOrUpdateFuture.Result.
+func (future *GalleryImagesCreateOrUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2203,6 +2367,7 @@ func (future *GalleryImagesCreateOrUpdateFuture) Result(client GalleryImagesClie
return
}
if !done {
+ gi.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture")
return
}
@@ -2219,12 +2384,25 @@ func (future *GalleryImagesCreateOrUpdateFuture) Result(client GalleryImagesClie
// GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type GalleryImagesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(GalleryImagesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *GalleryImagesDeleteFuture) Result(client GalleryImagesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *GalleryImagesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for GalleryImagesDeleteFuture.Result.
+func (future *GalleryImagesDeleteFuture) result(client GalleryImagesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2232,6 +2410,7 @@ func (future *GalleryImagesDeleteFuture) Result(client GalleryImagesClient) (ar
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture")
return
}
@@ -2558,12 +2737,25 @@ func (givpp GalleryImageVersionPublishingProfile) MarshalJSON() ([]byte, error)
// GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type GalleryImageVersionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(GalleryImageVersionsClient) (GalleryImageVersion, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *GalleryImageVersionsCreateOrUpdateFuture) Result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *GalleryImageVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for GalleryImageVersionsCreateOrUpdateFuture.Result.
+func (future *GalleryImageVersionsCreateOrUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2571,6 +2763,7 @@ func (future *GalleryImageVersionsCreateOrUpdateFuture) Result(client GalleryIma
return
}
if !done {
+ giv.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture")
return
}
@@ -2587,12 +2780,25 @@ func (future *GalleryImageVersionsCreateOrUpdateFuture) Result(client GalleryIma
// GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type GalleryImageVersionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(GalleryImageVersionsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *GalleryImageVersionsDeleteFuture) Result(client GalleryImageVersionsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *GalleryImageVersionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for GalleryImageVersionsDeleteFuture.Result.
+func (future *GalleryImageVersionsDeleteFuture) result(client GalleryImageVersionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2600,6 +2806,7 @@ func (future *GalleryImageVersionsDeleteFuture) Result(client GalleryImageVersio
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture")
return
}
@@ -2615,6 +2822,12 @@ type GalleryImageVersionStorageProfile struct {
DataDiskImages *[]GalleryDataDiskImage `json:"dataDiskImages,omitempty"`
}
+// MarshalJSON is the custom marshaler for GalleryImageVersionStorageProfile.
+func (givsp GalleryImageVersionStorageProfile) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// GalleryList the List Galleries operation response.
type GalleryList struct {
autorest.Response `json:"-"`
@@ -2782,6 +2995,12 @@ type GalleryOSDiskImage struct {
HostCaching HostCaching `json:"hostCaching,omitempty"`
}
+// MarshalJSON is the custom marshaler for GalleryOSDiskImage.
+func (godi GalleryOSDiskImage) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// GalleryProperties describes the properties of a Shared Image Gallery.
type GalleryProperties struct {
// Description - The description of this Shared Image Gallery resource. This property is updatable.
@@ -3177,12 +3396,25 @@ type ImageReference struct {
// ImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ImagesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ImagesClient) (Image, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ImagesCreateOrUpdateFuture) Result(client ImagesClient) (i Image, err error) {
+// result is the default implementation for ImagesCreateOrUpdateFuture.Result.
+func (future *ImagesCreateOrUpdateFuture) result(client ImagesClient) (i Image, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3190,6 +3422,7 @@ func (future *ImagesCreateOrUpdateFuture) Result(client ImagesClient) (i Image,
return
}
if !done {
+ i.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture")
return
}
@@ -3205,12 +3438,25 @@ func (future *ImagesCreateOrUpdateFuture) Result(client ImagesClient) (i Image,
// ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type ImagesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ImagesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ImagesDeleteFuture) Result(client ImagesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ImagesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ImagesDeleteFuture.Result.
+func (future *ImagesDeleteFuture) result(client ImagesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3218,6 +3464,7 @@ func (future *ImagesDeleteFuture) Result(client ImagesClient) (ar autorest.Respo
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture")
return
}
@@ -3237,12 +3484,25 @@ type ImageStorageProfile struct {
// ImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type ImagesUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ImagesClient) (Image, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ImagesUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ImagesUpdateFuture) Result(client ImagesClient) (i Image, err error) {
+// result is the default implementation for ImagesUpdateFuture.Result.
+func (future *ImagesUpdateFuture) result(client ImagesClient) (i Image, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3250,6 +3510,7 @@ func (future *ImagesUpdateFuture) Result(client ImagesClient) (i Image, err erro
return
}
if !done {
+ i.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture")
return
}
@@ -3558,12 +3819,25 @@ type ListVirtualMachineImageResource struct {
// LogAnalyticsExportRequestRateByIntervalFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type LogAnalyticsExportRequestRateByIntervalFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LogAnalyticsExportRequestRateByIntervalFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LogAnalyticsExportRequestRateByIntervalFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for LogAnalyticsExportRequestRateByIntervalFuture.Result.
+func (future *LogAnalyticsExportRequestRateByIntervalFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3571,6 +3845,7 @@ func (future *LogAnalyticsExportRequestRateByIntervalFuture) Result(client LogAn
return
}
if !done {
+ laor.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture")
return
}
@@ -3587,12 +3862,25 @@ func (future *LogAnalyticsExportRequestRateByIntervalFuture) Result(client LogAn
// LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type LogAnalyticsExportThrottledRequestsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LogAnalyticsExportThrottledRequestsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LogAnalyticsExportThrottledRequestsFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) {
+// result is the default implementation for LogAnalyticsExportThrottledRequestsFuture.Result.
+func (future *LogAnalyticsExportThrottledRequestsFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3600,6 +3888,7 @@ func (future *LogAnalyticsExportThrottledRequestsFuture) Result(client LogAnalyt
return
}
if !done {
+ laor.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture")
return
}
@@ -3636,12 +3925,24 @@ type LogAnalyticsOperationResult struct {
Properties *LogAnalyticsOutput `json:"properties,omitempty"`
}
+// MarshalJSON is the custom marshaler for LogAnalyticsOperationResult.
+func (laor LogAnalyticsOperationResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// LogAnalyticsOutput logAnalytics output properties
type LogAnalyticsOutput struct {
// Output - READ-ONLY; Output file Uri path to blob container.
Output *string `json:"output,omitempty"`
}
+// MarshalJSON is the custom marshaler for LogAnalyticsOutput.
+func (lao LogAnalyticsOutput) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// MaintenanceRedeployStatus maintenance Operation Status.
type MaintenanceRedeployStatus struct {
// IsCustomerInitiatedMaintenanceAllowed - True, if customer is allowed to perform Maintenance.
@@ -3745,6 +4046,12 @@ type OperationListResult struct {
Value *[]OperationValue `json:"value,omitempty"`
}
+// MarshalJSON is the custom marshaler for OperationListResult.
+func (olr OperationListResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// OperationValue describes the properties of a Compute Operation value.
type OperationValue struct {
// Origin - READ-ONLY; The origin of the compute operation.
@@ -3817,6 +4124,12 @@ type OperationValueDisplay struct {
Provider *string `json:"provider,omitempty"`
}
+// MarshalJSON is the custom marshaler for OperationValueDisplay.
+func (ovd OperationValueDisplay) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// OSDisk specifies information about the operating system disk used by the virtual machine.
For
// more information about disks, see [About disks and VHDs for Azure virtual
// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
@@ -4210,6 +4523,12 @@ type RecoveryWalkResponse struct {
NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"`
}
+// MarshalJSON is the custom marshaler for RecoveryWalkResponse.
+func (rwr RecoveryWalkResponse) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// RegionalReplicationStatus this is the regional replication status.
type RegionalReplicationStatus struct {
// Region - READ-ONLY; The region to which the gallery Image Version is being replicated to.
@@ -4222,6 +4541,12 @@ type RegionalReplicationStatus struct {
Progress *int32 `json:"progress,omitempty"`
}
+// MarshalJSON is the custom marshaler for RegionalReplicationStatus.
+func (rrs RegionalReplicationStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ReplicationStatus this is the replication status of the gallery Image Version.
type ReplicationStatus struct {
// AggregatedState - READ-ONLY; This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'Unknown', 'InProgress', 'Completed', 'Failed'
@@ -4230,6 +4555,12 @@ type ReplicationStatus struct {
Summary *[]RegionalReplicationStatus `json:"summary,omitempty"`
}
+// MarshalJSON is the custom marshaler for ReplicationStatus.
+func (rs ReplicationStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api.
type RequestRateByIntervalInput struct {
// IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'ThreeMins', 'FiveMins', 'ThirtyMins', 'SixtyMins'
@@ -4312,6 +4643,12 @@ type ResourceSku struct {
Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"`
}
+// MarshalJSON is the custom marshaler for ResourceSku.
+func (rs ResourceSku) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ResourceSkuCapabilities describes The SKU capabilities object.
type ResourceSkuCapabilities struct {
// Name - READ-ONLY; An invariant to describe the feature.
@@ -4320,6 +4657,12 @@ type ResourceSkuCapabilities struct {
Value *string `json:"value,omitempty"`
}
+// MarshalJSON is the custom marshaler for ResourceSkuCapabilities.
+func (rsc ResourceSkuCapabilities) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ResourceSkuCapacity describes scaling information of a SKU.
type ResourceSkuCapacity struct {
// Minimum - READ-ONLY; The minimum capacity.
@@ -4332,6 +4675,12 @@ type ResourceSkuCapacity struct {
ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"`
}
+// MarshalJSON is the custom marshaler for ResourceSkuCapacity.
+func (rsc ResourceSkuCapacity) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ResourceSkuCosts describes metadata for retrieving price info.
type ResourceSkuCosts struct {
// MeterID - READ-ONLY; Used for querying price from commerce.
@@ -4342,6 +4691,12 @@ type ResourceSkuCosts struct {
ExtendedUnit *string `json:"extendedUnit,omitempty"`
}
+// MarshalJSON is the custom marshaler for ResourceSkuCosts.
+func (rsc ResourceSkuCosts) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ResourceSkuLocationInfo ...
type ResourceSkuLocationInfo struct {
// Location - READ-ONLY; Location of the SKU
@@ -4350,6 +4705,12 @@ type ResourceSkuLocationInfo struct {
Zones *[]string `json:"zones,omitempty"`
}
+// MarshalJSON is the custom marshaler for ResourceSkuLocationInfo.
+func (rsli ResourceSkuLocationInfo) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ResourceSkuRestrictionInfo ...
type ResourceSkuRestrictionInfo struct {
// Locations - READ-ONLY; Locations where the SKU is restricted
@@ -4358,6 +4719,12 @@ type ResourceSkuRestrictionInfo struct {
Zones *[]string `json:"zones,omitempty"`
}
+// MarshalJSON is the custom marshaler for ResourceSkuRestrictionInfo.
+func (rsri ResourceSkuRestrictionInfo) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ResourceSkuRestrictions describes scaling information of a SKU.
type ResourceSkuRestrictions struct {
// Type - READ-ONLY; The type of restrictions. Possible values include: 'Location', 'Zone'
@@ -4370,6 +4737,12 @@ type ResourceSkuRestrictions struct {
ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"`
}
+// MarshalJSON is the custom marshaler for ResourceSkuRestrictions.
+func (rsr ResourceSkuRestrictions) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ResourceSkusResult the List Resource Skus operation response.
type ResourceSkusResult struct {
autorest.Response `json:"-"`
@@ -4539,6 +4912,12 @@ type RollbackStatusInfo struct {
RollbackError *APIError `json:"rollbackError,omitempty"`
}
+// MarshalJSON is the custom marshaler for RollbackStatusInfo.
+func (rsi RollbackStatusInfo) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// RollingUpgradePolicy the configuration parameters used while performing a rolling upgrade.
type RollingUpgradePolicy struct {
// MaxBatchInstancePercent - The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%.
@@ -4564,6 +4943,12 @@ type RollingUpgradeProgressInfo struct {
PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"`
}
+// MarshalJSON is the custom marshaler for RollingUpgradeProgressInfo.
+func (rupi RollingUpgradeProgressInfo) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// RollingUpgradeRunningStatus information about the current running state of the overall upgrade.
type RollingUpgradeRunningStatus struct {
// Code - READ-ONLY; Code indicating the current status of the upgrade. Possible values include: 'RollingUpgradeStatusCodeRollingForward', 'RollingUpgradeStatusCodeCancelled', 'RollingUpgradeStatusCodeCompleted', 'RollingUpgradeStatusCodeFaulted'
@@ -4576,6 +4961,12 @@ type RollingUpgradeRunningStatus struct {
LastActionTime *date.Time `json:"lastActionTime,omitempty"`
}
+// MarshalJSON is the custom marshaler for RollingUpgradeRunningStatus.
+func (rurs RollingUpgradeRunningStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// RollingUpgradeStatusInfo the status of the latest virtual machine scale set rolling upgrade.
type RollingUpgradeStatusInfo struct {
autorest.Response `json:"-"`
@@ -4688,6 +5079,12 @@ type RollingUpgradeStatusInfoProperties struct {
Error *APIError `json:"error,omitempty"`
}
+// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfoProperties.
+func (rusip RollingUpgradeStatusInfoProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// RunCommandDocument describes the properties of a Run Command.
type RunCommandDocument struct {
autorest.Response `json:"-"`
@@ -5249,12 +5646,25 @@ func (sp SnapshotProperties) MarshalJSON() ([]byte, error) {
// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SnapshotsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SnapshotsClient) (Snapshot, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SnapshotsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SnapshotsCreateOrUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) {
+// result is the default implementation for SnapshotsCreateOrUpdateFuture.Result.
+func (future *SnapshotsCreateOrUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5262,6 +5672,7 @@ func (future *SnapshotsCreateOrUpdateFuture) Result(client SnapshotsClient) (s S
return
}
if !done {
+ s.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture")
return
}
@@ -5278,12 +5689,25 @@ func (future *SnapshotsCreateOrUpdateFuture) Result(client SnapshotsClient) (s S
// SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SnapshotsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SnapshotsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SnapshotsDeleteFuture) Result(client SnapshotsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SnapshotsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for SnapshotsDeleteFuture.Result.
+func (future *SnapshotsDeleteFuture) result(client SnapshotsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5291,6 +5715,7 @@ func (future *SnapshotsDeleteFuture) Result(client SnapshotsClient) (ar autorest
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture")
return
}
@@ -5301,12 +5726,25 @@ func (future *SnapshotsDeleteFuture) Result(client SnapshotsClient) (ar autorest
// SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SnapshotsGrantAccessFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SnapshotsClient) (AccessURI, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SnapshotsGrantAccessFuture) Result(client SnapshotsClient) (au AccessURI, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SnapshotsGrantAccessFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for SnapshotsGrantAccessFuture.Result.
+func (future *SnapshotsGrantAccessFuture) result(client SnapshotsClient) (au AccessURI, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5314,6 +5752,7 @@ func (future *SnapshotsGrantAccessFuture) Result(client SnapshotsClient) (au Acc
return
}
if !done {
+ au.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture")
return
}
@@ -5347,12 +5786,25 @@ func (ss SnapshotSku) MarshalJSON() ([]byte, error) {
// SnapshotsRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SnapshotsRevokeAccessFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SnapshotsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SnapshotsRevokeAccessFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SnapshotsRevokeAccessFuture) Result(client SnapshotsClient) (ar autorest.Response, err error) {
+// result is the default implementation for SnapshotsRevokeAccessFuture.Result.
+func (future *SnapshotsRevokeAccessFuture) result(client SnapshotsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5360,6 +5812,7 @@ func (future *SnapshotsRevokeAccessFuture) Result(client SnapshotsClient) (ar au
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture")
return
}
@@ -5370,12 +5823,25 @@ func (future *SnapshotsRevokeAccessFuture) Result(client SnapshotsClient) (ar au
// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SnapshotsUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SnapshotsClient) (Snapshot, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SnapshotsUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SnapshotsUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for SnapshotsUpdateFuture.Result.
+func (future *SnapshotsUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5383,6 +5849,7 @@ func (future *SnapshotsUpdateFuture) Result(client SnapshotsClient) (s Snapshot,
return
}
if !done {
+ s.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture")
return
}
@@ -5515,6 +5982,12 @@ type SubResourceReadOnly struct {
ID *string `json:"id,omitempty"`
}
+// MarshalJSON is the custom marshaler for SubResourceReadOnly.
+func (srro SubResourceReadOnly) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// TargetRegion describes the target region information.
type TargetRegion struct {
// Name - The name of the region.
@@ -5564,6 +6037,12 @@ type UpgradeOperationHistoricalStatusInfo struct {
Location *string `json:"location,omitempty"`
}
+// MarshalJSON is the custom marshaler for UpgradeOperationHistoricalStatusInfo.
+func (uohsi UpgradeOperationHistoricalStatusInfo) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// UpgradeOperationHistoricalStatusInfoProperties describes each OS upgrade on the Virtual Machine Scale
// Set.
type UpgradeOperationHistoricalStatusInfoProperties struct {
@@ -5581,6 +6060,12 @@ type UpgradeOperationHistoricalStatusInfoProperties struct {
RollbackInfo *RollbackStatusInfo `json:"rollbackInfo,omitempty"`
}
+// MarshalJSON is the custom marshaler for UpgradeOperationHistoricalStatusInfoProperties.
+func (uohsip UpgradeOperationHistoricalStatusInfoProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// UpgradeOperationHistoryStatus information about the current running state of the overall upgrade.
type UpgradeOperationHistoryStatus struct {
// Code - READ-ONLY; Code indicating the current status of the upgrade. Possible values include: 'UpgradeStateRollingForward', 'UpgradeStateCancelled', 'UpgradeStateCompleted', 'UpgradeStateFaulted'
@@ -5591,6 +6076,12 @@ type UpgradeOperationHistoryStatus struct {
EndTime *date.Time `json:"endTime,omitempty"`
}
+// MarshalJSON is the custom marshaler for UpgradeOperationHistoryStatus.
+func (uohs UpgradeOperationHistoryStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// UpgradePolicy describes an upgrade policy - automatic, manual, or rolling.
type UpgradePolicy struct {
// Mode - Specifies the mode of an upgrade to virtual machines in the scale set.
Possible values are:
**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.
**Automatic** - All virtual machines in the scale set are automatically updated at the same time. Possible values include: 'Automatic', 'Manual', 'Rolling'
@@ -6134,12 +6625,25 @@ func (vmep VirtualMachineExtensionProperties) MarshalJSON() ([]byte, error) {
// VirtualMachineExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualMachineExtensionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineExtensionsCreateOrUpdateFuture) Result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineExtensionsCreateOrUpdateFuture.Result.
+func (future *VirtualMachineExtensionsCreateOrUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -6147,6 +6651,7 @@ func (future *VirtualMachineExtensionsCreateOrUpdateFuture) Result(client Virtua
return
}
if !done {
+ vme.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture")
return
}
@@ -6163,12 +6668,25 @@ func (future *VirtualMachineExtensionsCreateOrUpdateFuture) Result(client Virtua
// VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineExtensionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineExtensionsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineExtensionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineExtensionsDeleteFuture) Result(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineExtensionsDeleteFuture.Result.
+func (future *VirtualMachineExtensionsDeleteFuture) result(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -6176,6 +6694,7 @@ func (future *VirtualMachineExtensionsDeleteFuture) Result(client VirtualMachine
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture")
return
}
@@ -6193,12 +6712,25 @@ type VirtualMachineExtensionsListResult struct {
// VirtualMachineExtensionsUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineExtensionsUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineExtensionsUpdateFuture) Result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineExtensionsUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineExtensionsUpdateFuture.Result.
+func (future *VirtualMachineExtensionsUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -6206,6 +6738,7 @@ func (future *VirtualMachineExtensionsUpdateFuture) Result(client VirtualMachine
return
}
if !done {
+ vme.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture")
return
}
@@ -6295,6 +6828,12 @@ type VirtualMachineHealthStatus struct {
Status *InstanceViewStatus `json:"status,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineHealthStatus.
+func (vmhs VirtualMachineHealthStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachineIdentity identity for the virtual machine.
type VirtualMachineIdentity struct {
// PrincipalID - READ-ONLY; The principal id of virtual machine identity. This property will only be provided for a system assigned identity.
@@ -6327,6 +6866,12 @@ type VirtualMachineIdentityUserAssignedIdentitiesValue struct {
ClientID *string `json:"clientId,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineIdentityUserAssignedIdentitiesValue.
+func (vmiAiv VirtualMachineIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachineImage describes a Virtual Machine Image.
type VirtualMachineImage struct {
autorest.Response `json:"-"`
@@ -7176,12 +7721,25 @@ func (vmssep VirtualMachineScaleSetExtensionProperties) MarshalJSON() ([]byte, e
// VirtualMachineScaleSetExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetExtensionsClient) (VirtualMachineScaleSetExtension, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetExtensionsCreateOrUpdateFuture.Result.
+func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -7189,6 +7747,7 @@ func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) Result(clien
return
}
if !done {
+ vmsse.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture")
return
}
@@ -7205,12 +7764,25 @@ func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) Result(clien
// VirtualMachineScaleSetExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualMachineScaleSetExtensionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetExtensionsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetExtensionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetExtensionsDeleteFuture) Result(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetExtensionsDeleteFuture.Result.
+func (future *VirtualMachineScaleSetExtensionsDeleteFuture) result(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -7218,6 +7790,7 @@ func (future *VirtualMachineScaleSetExtensionsDeleteFuture) Result(client Virtua
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture")
return
}
@@ -7257,6 +7830,12 @@ type VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue struct {
ClientID *string `json:"clientId,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue.
+func (vmssiAiv VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachineScaleSetInstanceView the instance view of a virtual machine scale set.
type VirtualMachineScaleSetInstanceView struct {
autorest.Response `json:"-"`
@@ -7284,6 +7863,12 @@ type VirtualMachineScaleSetInstanceViewStatusesSummary struct {
StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineScaleSetInstanceViewStatusesSummary.
+func (vmssivss VirtualMachineScaleSetInstanceViewStatusesSummary) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachineScaleSetIPConfiguration describes a virtual machine scale set network profile's IP
// configuration.
type VirtualMachineScaleSetIPConfiguration struct {
@@ -8312,12 +8897,25 @@ type VirtualMachineScaleSetReimageParameters struct {
// VirtualMachineScaleSetRollingUpgradesCancelFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualMachineScaleSetRollingUpgradesCancelFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetRollingUpgradesCancelFuture.Result.
+func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8325,6 +8923,7 @@ func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) Result(client V
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture")
return
}
@@ -8335,12 +8934,25 @@ func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) Result(client V
// VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture an abstraction for monitoring and
// retrieving the results of a long-running operation.
type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture.Result.
+func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8348,6 +8960,7 @@ func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture)
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture")
return
}
@@ -8358,12 +8971,25 @@ func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture)
// VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving
// the results of a long-running operation.
type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture.Result.
+func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8371,6 +8997,7 @@ func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) Result(
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture")
return
}
@@ -8381,12 +9008,25 @@ func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) Result(
// VirtualMachineScaleSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualMachineScaleSetsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetsCreateOrUpdateFuture.Result.
+func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8394,6 +9034,7 @@ func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) Result(client Virtual
return
}
if !done {
+ vmss.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture")
return
}
@@ -8410,12 +9051,25 @@ func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) Result(client Virtual
// VirtualMachineScaleSetsDeallocateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsDeallocateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsDeallocateFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsDeallocateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetsDeallocateFuture.Result.
+func (future *VirtualMachineScaleSetsDeallocateFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8423,6 +9077,7 @@ func (future *VirtualMachineScaleSetsDeallocateFuture) Result(client VirtualMach
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture")
return
}
@@ -8433,12 +9088,25 @@ func (future *VirtualMachineScaleSetsDeallocateFuture) Result(client VirtualMach
// VirtualMachineScaleSetsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsDeleteFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetsDeleteFuture.Result.
+func (future *VirtualMachineScaleSetsDeleteFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8446,6 +9114,7 @@ func (future *VirtualMachineScaleSetsDeleteFuture) Result(client VirtualMachineS
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture")
return
}
@@ -8456,12 +9125,25 @@ func (future *VirtualMachineScaleSetsDeleteFuture) Result(client VirtualMachineS
// VirtualMachineScaleSetsDeleteInstancesFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualMachineScaleSetsDeleteInstancesFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsDeleteInstancesFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsDeleteInstancesFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetsDeleteInstancesFuture.Result.
+func (future *VirtualMachineScaleSetsDeleteInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8469,6 +9151,7 @@ func (future *VirtualMachineScaleSetsDeleteInstancesFuture) Result(client Virtua
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture")
return
}
@@ -8486,6 +9169,12 @@ type VirtualMachineScaleSetSku struct {
Capacity *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineScaleSetSku.
+func (vmsss VirtualMachineScaleSetSku) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachineScaleSetSkuCapacity describes scaling information of a sku.
type VirtualMachineScaleSetSkuCapacity struct {
// Minimum - READ-ONLY; The minimum capacity.
@@ -8498,15 +9187,34 @@ type VirtualMachineScaleSetSkuCapacity struct {
ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineScaleSetSkuCapacity.
+func (vmsssc VirtualMachineScaleSetSkuCapacity) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachineScaleSetsPerformMaintenanceFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type VirtualMachineScaleSetsPerformMaintenanceFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetsPerformMaintenanceFuture.Result.
+func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8514,6 +9222,7 @@ func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) Result(client Vir
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture")
return
}
@@ -8524,12 +9233,25 @@ func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) Result(client Vir
// VirtualMachineScaleSetsPowerOffFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsPowerOffFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsPowerOffFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsPowerOffFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetsPowerOffFuture.Result.
+func (future *VirtualMachineScaleSetsPowerOffFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8537,6 +9259,7 @@ func (future *VirtualMachineScaleSetsPowerOffFuture) Result(client VirtualMachin
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture")
return
}
@@ -8547,12 +9270,25 @@ func (future *VirtualMachineScaleSetsPowerOffFuture) Result(client VirtualMachin
// VirtualMachineScaleSetsRedeployFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsRedeployFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsRedeployFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsRedeployFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetsRedeployFuture.Result.
+func (future *VirtualMachineScaleSetsRedeployFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8560,6 +9296,7 @@ func (future *VirtualMachineScaleSetsRedeployFuture) Result(client VirtualMachin
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture")
return
}
@@ -8570,12 +9307,25 @@ func (future *VirtualMachineScaleSetsRedeployFuture) Result(client VirtualMachin
// VirtualMachineScaleSetsReimageAllFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsReimageAllFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsReimageAllFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsReimageAllFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetsReimageAllFuture.Result.
+func (future *VirtualMachineScaleSetsReimageAllFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8583,6 +9333,7 @@ func (future *VirtualMachineScaleSetsReimageAllFuture) Result(client VirtualMach
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture")
return
}
@@ -8593,12 +9344,25 @@ func (future *VirtualMachineScaleSetsReimageAllFuture) Result(client VirtualMach
// VirtualMachineScaleSetsReimageFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsReimageFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsReimageFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsReimageFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetsReimageFuture.Result.
+func (future *VirtualMachineScaleSetsReimageFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8606,6 +9370,7 @@ func (future *VirtualMachineScaleSetsReimageFuture) Result(client VirtualMachine
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture")
return
}
@@ -8616,12 +9381,25 @@ func (future *VirtualMachineScaleSetsReimageFuture) Result(client VirtualMachine
// VirtualMachineScaleSetsRestartFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsRestartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsRestartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsRestartFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetsRestartFuture.Result.
+func (future *VirtualMachineScaleSetsRestartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8629,6 +9407,7 @@ func (future *VirtualMachineScaleSetsRestartFuture) Result(client VirtualMachine
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture")
return
}
@@ -8639,12 +9418,25 @@ func (future *VirtualMachineScaleSetsRestartFuture) Result(client VirtualMachine
// VirtualMachineScaleSetsStartFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsStartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsStartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsStartFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetsStartFuture.Result.
+func (future *VirtualMachineScaleSetsStartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8652,6 +9444,7 @@ func (future *VirtualMachineScaleSetsStartFuture) Result(client VirtualMachineSc
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture")
return
}
@@ -8672,12 +9465,25 @@ type VirtualMachineScaleSetStorageProfile struct {
// VirtualMachineScaleSetsUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetsUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetsUpdateFuture.Result.
+func (future *VirtualMachineScaleSetsUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8685,6 +9491,7 @@ func (future *VirtualMachineScaleSetsUpdateFuture) Result(client VirtualMachineS
return
}
if !done {
+ vmss.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture")
return
}
@@ -8701,12 +9508,25 @@ func (future *VirtualMachineScaleSetsUpdateFuture) Result(client VirtualMachineS
// VirtualMachineScaleSetsUpdateInstancesFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualMachineScaleSetsUpdateInstancesFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetsUpdateInstancesFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetsUpdateInstancesFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetsUpdateInstancesFuture.Result.
+func (future *VirtualMachineScaleSetsUpdateInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8714,6 +9534,7 @@ func (future *VirtualMachineScaleSetsUpdateInstancesFuture) Result(client Virtua
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture")
return
}
@@ -9297,6 +10118,12 @@ type VirtualMachineScaleSetVMExtensionsSummary struct {
StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVMExtensionsSummary.
+func (vmssves VirtualMachineScaleSetVMExtensionsSummary) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachineScaleSetVMInstanceIDs specifies a list of virtual machine instance IDs from the VM scale
// set.
type VirtualMachineScaleSetVMInstanceIDs struct {
@@ -9624,12 +10451,25 @@ type VirtualMachineScaleSetVMReimageParameters struct {
// VirtualMachineScaleSetVMsDeallocateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsDeallocateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsDeallocateFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsDeallocateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetVMsDeallocateFuture.Result.
+func (future *VirtualMachineScaleSetVMsDeallocateFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9637,6 +10477,7 @@ func (future *VirtualMachineScaleSetVMsDeallocateFuture) Result(client VirtualMa
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture")
return
}
@@ -9647,12 +10488,25 @@ func (future *VirtualMachineScaleSetVMsDeallocateFuture) Result(client VirtualMa
// VirtualMachineScaleSetVMsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsDeleteFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetVMsDeleteFuture.Result.
+func (future *VirtualMachineScaleSetVMsDeleteFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9660,6 +10514,7 @@ func (future *VirtualMachineScaleSetVMsDeleteFuture) Result(client VirtualMachin
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture")
return
}
@@ -9670,12 +10525,25 @@ func (future *VirtualMachineScaleSetVMsDeleteFuture) Result(client VirtualMachin
// VirtualMachineScaleSetVMsPerformMaintenanceFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetVMsPerformMaintenanceFuture.Result.
+func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9683,6 +10551,7 @@ func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) Result(client V
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture")
return
}
@@ -9693,12 +10562,25 @@ func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) Result(client V
// VirtualMachineScaleSetVMsPowerOffFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsPowerOffFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsPowerOffFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsPowerOffFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetVMsPowerOffFuture.Result.
+func (future *VirtualMachineScaleSetVMsPowerOffFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9706,6 +10588,7 @@ func (future *VirtualMachineScaleSetVMsPowerOffFuture) Result(client VirtualMach
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture")
return
}
@@ -9716,12 +10599,25 @@ func (future *VirtualMachineScaleSetVMsPowerOffFuture) Result(client VirtualMach
// VirtualMachineScaleSetVMsRedeployFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsRedeployFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsRedeployFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsRedeployFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetVMsRedeployFuture.Result.
+func (future *VirtualMachineScaleSetVMsRedeployFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9729,6 +10625,7 @@ func (future *VirtualMachineScaleSetVMsRedeployFuture) Result(client VirtualMach
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture")
return
}
@@ -9739,12 +10636,25 @@ func (future *VirtualMachineScaleSetVMsRedeployFuture) Result(client VirtualMach
// VirtualMachineScaleSetVMsReimageAllFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsReimageAllFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsReimageAllFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsReimageAllFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetVMsReimageAllFuture.Result.
+func (future *VirtualMachineScaleSetVMsReimageAllFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9752,6 +10662,7 @@ func (future *VirtualMachineScaleSetVMsReimageAllFuture) Result(client VirtualMa
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture")
return
}
@@ -9762,12 +10673,25 @@ func (future *VirtualMachineScaleSetVMsReimageAllFuture) Result(client VirtualMa
// VirtualMachineScaleSetVMsReimageFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsReimageFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsReimageFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsReimageFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetVMsReimageFuture.Result.
+func (future *VirtualMachineScaleSetVMsReimageFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9775,6 +10699,7 @@ func (future *VirtualMachineScaleSetVMsReimageFuture) Result(client VirtualMachi
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture")
return
}
@@ -9785,12 +10710,25 @@ func (future *VirtualMachineScaleSetVMsReimageFuture) Result(client VirtualMachi
// VirtualMachineScaleSetVMsRestartFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsRestartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsRestartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsRestartFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachineScaleSetVMsRestartFuture.Result.
+func (future *VirtualMachineScaleSetVMsRestartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9798,6 +10736,7 @@ func (future *VirtualMachineScaleSetVMsRestartFuture) Result(client VirtualMachi
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture")
return
}
@@ -9808,12 +10747,25 @@ func (future *VirtualMachineScaleSetVMsRestartFuture) Result(client VirtualMachi
// VirtualMachineScaleSetVMsRunCommandFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsRunCommandFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (RunCommandResult, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsRunCommandFuture) Result(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsRunCommandFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetVMsRunCommandFuture.Result.
+func (future *VirtualMachineScaleSetVMsRunCommandFuture) result(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9821,6 +10773,7 @@ func (future *VirtualMachineScaleSetVMsRunCommandFuture) Result(client VirtualMa
return
}
if !done {
+ rcr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture")
return
}
@@ -9837,12 +10790,25 @@ func (future *VirtualMachineScaleSetVMsRunCommandFuture) Result(client VirtualMa
// VirtualMachineScaleSetVMsStartFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsStartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsStartFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsStartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachineScaleSetVMsStartFuture.Result.
+func (future *VirtualMachineScaleSetVMsStartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9850,6 +10816,7 @@ func (future *VirtualMachineScaleSetVMsStartFuture) Result(client VirtualMachine
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture")
return
}
@@ -9860,12 +10827,25 @@ func (future *VirtualMachineScaleSetVMsStartFuture) Result(client VirtualMachine
// VirtualMachineScaleSetVMsUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachineScaleSetVMsUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachineScaleSetVMsClient) (VirtualMachineScaleSetVM, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachineScaleSetVMsUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachineScaleSetVMsUpdateFuture) Result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) {
+// result is the default implementation for VirtualMachineScaleSetVMsUpdateFuture.Result.
+func (future *VirtualMachineScaleSetVMsUpdateFuture) result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9873,6 +10853,7 @@ func (future *VirtualMachineScaleSetVMsUpdateFuture) Result(client VirtualMachin
return
}
if !done {
+ vmssv.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture")
return
}
@@ -9889,12 +10870,25 @@ func (future *VirtualMachineScaleSetVMsUpdateFuture) Result(client VirtualMachin
// VirtualMachinesCaptureFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesCaptureFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (VirtualMachineCaptureResult, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesCaptureFuture) Result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesCaptureFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachinesCaptureFuture.Result.
+func (future *VirtualMachinesCaptureFuture) result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9902,6 +10896,7 @@ func (future *VirtualMachinesCaptureFuture) Result(client VirtualMachinesClient)
return
}
if !done {
+ vmcr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture")
return
}
@@ -9918,12 +10913,25 @@ func (future *VirtualMachinesCaptureFuture) Result(client VirtualMachinesClient)
// VirtualMachinesConvertToManagedDisksFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachinesConvertToManagedDisksFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesConvertToManagedDisksFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesConvertToManagedDisksFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachinesConvertToManagedDisksFuture.Result.
+func (future *VirtualMachinesConvertToManagedDisksFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9931,6 +10939,7 @@ func (future *VirtualMachinesConvertToManagedDisksFuture) Result(client VirtualM
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture")
return
}
@@ -9941,12 +10950,25 @@ func (future *VirtualMachinesConvertToManagedDisksFuture) Result(client VirtualM
// VirtualMachinesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachinesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (VirtualMachine, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesCreateOrUpdateFuture) Result(client VirtualMachinesClient) (VM VirtualMachine, err error) {
+// result is the default implementation for VirtualMachinesCreateOrUpdateFuture.Result.
+func (future *VirtualMachinesCreateOrUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9954,6 +10976,7 @@ func (future *VirtualMachinesCreateOrUpdateFuture) Result(client VirtualMachines
return
}
if !done {
+ VM.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture")
return
}
@@ -9970,12 +10993,25 @@ func (future *VirtualMachinesCreateOrUpdateFuture) Result(client VirtualMachines
// VirtualMachinesDeallocateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachinesDeallocateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesDeallocateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesDeallocateFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachinesDeallocateFuture.Result.
+func (future *VirtualMachinesDeallocateFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9983,6 +11019,7 @@ func (future *VirtualMachinesDeallocateFuture) Result(client VirtualMachinesClie
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture")
return
}
@@ -9993,12 +11030,25 @@ func (future *VirtualMachinesDeallocateFuture) Result(client VirtualMachinesClie
// VirtualMachinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesDeleteFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachinesDeleteFuture.Result.
+func (future *VirtualMachinesDeleteFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10006,6 +11056,7 @@ func (future *VirtualMachinesDeleteFuture) Result(client VirtualMachinesClient)
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture")
return
}
@@ -10039,12 +11090,25 @@ type VirtualMachineSizeListResult struct {
// VirtualMachinesPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachinesPerformMaintenanceFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesPerformMaintenanceFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesPerformMaintenanceFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachinesPerformMaintenanceFuture.Result.
+func (future *VirtualMachinesPerformMaintenanceFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10052,6 +11116,7 @@ func (future *VirtualMachinesPerformMaintenanceFuture) Result(client VirtualMach
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture")
return
}
@@ -10062,12 +11127,25 @@ func (future *VirtualMachinesPerformMaintenanceFuture) Result(client VirtualMach
// VirtualMachinesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesPowerOffFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesPowerOffFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesPowerOffFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachinesPowerOffFuture.Result.
+func (future *VirtualMachinesPowerOffFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10075,6 +11153,7 @@ func (future *VirtualMachinesPowerOffFuture) Result(client VirtualMachinesClient
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture")
return
}
@@ -10085,12 +11164,25 @@ func (future *VirtualMachinesPowerOffFuture) Result(client VirtualMachinesClient
// VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesRedeployFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesRedeployFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesRedeployFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachinesRedeployFuture.Result.
+func (future *VirtualMachinesRedeployFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10098,6 +11190,7 @@ func (future *VirtualMachinesRedeployFuture) Result(client VirtualMachinesClient
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture")
return
}
@@ -10108,12 +11201,25 @@ func (future *VirtualMachinesRedeployFuture) Result(client VirtualMachinesClient
// VirtualMachinesReimageFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesReimageFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesReimageFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesReimageFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachinesReimageFuture.Result.
+func (future *VirtualMachinesReimageFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10121,6 +11227,7 @@ func (future *VirtualMachinesReimageFuture) Result(client VirtualMachinesClient)
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReimageFuture")
return
}
@@ -10131,12 +11238,25 @@ func (future *VirtualMachinesReimageFuture) Result(client VirtualMachinesClient)
// VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesRestartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesRestartFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesRestartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualMachinesRestartFuture.Result.
+func (future *VirtualMachinesRestartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10144,6 +11264,7 @@ func (future *VirtualMachinesRestartFuture) Result(client VirtualMachinesClient)
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture")
return
}
@@ -10154,12 +11275,25 @@ func (future *VirtualMachinesRestartFuture) Result(client VirtualMachinesClient)
// VirtualMachinesRunCommandFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualMachinesRunCommandFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (RunCommandResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesRunCommandFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesRunCommandFuture) Result(client VirtualMachinesClient) (rcr RunCommandResult, err error) {
+// result is the default implementation for VirtualMachinesRunCommandFuture.Result.
+func (future *VirtualMachinesRunCommandFuture) result(client VirtualMachinesClient) (rcr RunCommandResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10167,6 +11301,7 @@ func (future *VirtualMachinesRunCommandFuture) Result(client VirtualMachinesClie
return
}
if !done {
+ rcr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture")
return
}
@@ -10183,12 +11318,25 @@ func (future *VirtualMachinesRunCommandFuture) Result(client VirtualMachinesClie
// VirtualMachinesStartFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesStartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesStartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesStartFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualMachinesStartFuture.Result.
+func (future *VirtualMachinesStartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10196,6 +11344,7 @@ func (future *VirtualMachinesStartFuture) Result(client VirtualMachinesClient) (
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture")
return
}
@@ -10212,15 +11361,34 @@ type VirtualMachineStatusCodeCount struct {
Count *int32 `json:"count,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualMachineStatusCodeCount.
+func (vmscc VirtualMachineStatusCodeCount) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualMachinesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualMachinesUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualMachinesClient) (VirtualMachine, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualMachinesUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualMachinesUpdateFuture) Result(client VirtualMachinesClient) (VM VirtualMachine, err error) {
+// result is the default implementation for VirtualMachinesUpdateFuture.Result.
+func (future *VirtualMachinesUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10228,6 +11396,7 @@ func (future *VirtualMachinesUpdateFuture) Result(client VirtualMachinesClient)
return
}
if !done {
+ VM.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture")
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go
index ecd3c13f4..72f4ddf52 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go
index 8cf0a675e..2a495e27e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -307,6 +296,7 @@ func (client ProximityPlacementGroupsClient) ListByResourceGroup(ctx context.Con
}
if result.ppglr.hasNextLink() && result.ppglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -367,7 +357,6 @@ func (client ProximityPlacementGroupsClient) listByResourceGroupNextResults(ctx
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -421,6 +410,7 @@ func (client ProximityPlacementGroupsClient) ListBySubscription(ctx context.Cont
}
if result.ppglr.hasNextLink() && result.ppglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -480,7 +470,6 @@ func (client ProximityPlacementGroupsClient) listBySubscriptionNextResults(ctx c
result, err = client.ListBySubscriptionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go
index 5194809c5..f42e100fc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -74,6 +63,7 @@ func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusR
}
if result.rsr.hasNextLink() && result.rsr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -133,7 +123,6 @@ func (client ResourceSkusClient) listNextResults(ctx context.Context, lastResult
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go
index 5a71addb7..6ebed2573 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -88,7 +77,7 @@ func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupN
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -127,7 +116,10 @@ func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future Sn
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -153,8 +145,8 @@ func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName stri
ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -167,7 +159,7 @@ func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName stri
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", nil, "Failure sending request")
return
}
@@ -203,7 +195,10 @@ func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsD
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -306,8 +301,8 @@ func (client SnapshotsClient) GrantAccess(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.GrantAccess")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -326,7 +321,7 @@ func (client SnapshotsClient) GrantAccess(ctx context.Context, resourceGroupName
result, err = client.GrantAccessSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", nil, "Failure sending request")
return
}
@@ -364,7 +359,10 @@ func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future Snaps
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -413,6 +411,7 @@ func (client SnapshotsClient) List(ctx context.Context) (result SnapshotListPage
}
if result.sl.hasNextLink() && result.sl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -472,7 +471,6 @@ func (client SnapshotsClient) listNextResults(ctx context.Context, lastResults S
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -528,6 +526,7 @@ func (client SnapshotsClient) ListByResourceGroup(ctx context.Context, resourceG
}
if result.sl.hasNextLink() && result.sl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -588,7 +587,6 @@ func (client SnapshotsClient) listByResourceGroupNextResults(ctx context.Context
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -619,8 +617,8 @@ func (client SnapshotsClient) RevokeAccess(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.RevokeAccess")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -633,7 +631,7 @@ func (client SnapshotsClient) RevokeAccess(ctx context.Context, resourceGroupNam
result, err = client.RevokeAccessSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", nil, "Failure sending request")
return
}
@@ -669,7 +667,10 @@ func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future Snap
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -695,8 +696,8 @@ func (client SnapshotsClient) Update(ctx context.Context, resourceGroupName stri
ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -709,7 +710,7 @@ func (client SnapshotsClient) Update(ctx context.Context, resourceGroupName stri
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", nil, "Failure sending request")
return
}
@@ -747,7 +748,10 @@ func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsU
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go
index 33ac90ac1..5da784bce 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -84,6 +73,7 @@ func (client UsageClient) List(ctx context.Context, location string) (result Lis
}
if result.lur.hasNextLink() && result.lur.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -144,7 +134,6 @@ func (client UsageClient) listNextResults(ctx context.Context, lastResults ListU
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go
index 8fa5eccbc..986a65475 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go
@@ -2,19 +2,8 @@ package compute
import "github.com/Azure/azure-sdk-for-go/version"
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go
index f0f0d0776..d389b76c3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go
index ec9497cb8..42b9f0ac1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdate(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -67,7 +56,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdate(ctx context.Context,
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -106,7 +95,10 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -132,8 +124,8 @@ func (client VirtualMachineExtensionsClient) Delete(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -146,7 +138,7 @@ func (client VirtualMachineExtensionsClient) Delete(ctx context.Context, resourc
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -183,7 +175,10 @@ func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -371,8 +366,8 @@ func (client VirtualMachineExtensionsClient) Update(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -385,7 +380,7 @@ func (client VirtualMachineExtensionsClient) Update(ctx context.Context, resourc
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Update", nil, "Failure sending request")
return
}
@@ -424,7 +419,10 @@ func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go
index 6e027eb36..59450d69d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go
index e7a36595b..0eb5fa414 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -166,6 +155,7 @@ func (client VirtualMachineRunCommandsClient) List(ctx context.Context, location
}
if result.rclr.hasNextLink() && result.rclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -226,7 +216,6 @@ func (client VirtualMachineRunCommandsClient) listNextResults(ctx context.Contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go
index 906f573bd..d22c47019 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VirtualMachinesClient) Capture(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Capture")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -75,7 +64,7 @@ func (client VirtualMachinesClient) Capture(ctx context.Context, resourceGroupNa
result, err = client.CaptureSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure sending request")
return
}
@@ -113,7 +102,10 @@ func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future Vir
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -139,8 +131,8 @@ func (client VirtualMachinesClient) ConvertToManagedDisks(ctx context.Context, r
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ConvertToManagedDisks")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -153,7 +145,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisks(ctx context.Context, r
result, err = client.ConvertToManagedDisksSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", nil, "Failure sending request")
return
}
@@ -189,7 +181,10 @@ func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Reques
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -214,8 +209,8 @@ func (client VirtualMachinesClient) CreateOrUpdate(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -249,7 +244,7 @@ func (client VirtualMachinesClient) CreateOrUpdate(ctx context.Context, resource
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -288,7 +283,10 @@ func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -314,8 +312,8 @@ func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Deallocate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -328,7 +326,7 @@ func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGrou
result, err = client.DeallocateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure sending request")
return
}
@@ -364,7 +362,10 @@ func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -388,8 +389,8 @@ func (client VirtualMachinesClient) Delete(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -402,7 +403,7 @@ func (client VirtualMachinesClient) Delete(ctx context.Context, resourceGroupNam
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure sending request")
return
}
@@ -438,7 +439,10 @@ func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future Virt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -720,6 +724,7 @@ func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName
}
if result.vmlr.hasNextLink() && result.vmlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -780,7 +785,6 @@ func (client VirtualMachinesClient) listNextResults(ctx context.Context, lastRes
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -835,6 +839,7 @@ func (client VirtualMachinesClient) ListAll(ctx context.Context) (result Virtual
}
if result.vmlr.hasNextLink() && result.vmlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -894,7 +899,6 @@ func (client VirtualMachinesClient) listAllNextResults(ctx context.Context, last
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -1032,6 +1036,7 @@ func (client VirtualMachinesClient) ListByLocation(ctx context.Context, location
}
if result.vmlr.hasNextLink() && result.vmlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -1092,7 +1097,6 @@ func (client VirtualMachinesClient) listByLocationNextResults(ctx context.Contex
result, err = client.ListByLocationResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listByLocationNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -1122,8 +1126,8 @@ func (client VirtualMachinesClient) PerformMaintenance(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.PerformMaintenance")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1136,7 +1140,7 @@ func (client VirtualMachinesClient) PerformMaintenance(ctx context.Context, reso
result, err = client.PerformMaintenanceSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", nil, "Failure sending request")
return
}
@@ -1172,7 +1176,10 @@ func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1197,8 +1204,8 @@ func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.PowerOff")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1211,7 +1218,7 @@ func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupN
result, err = client.PowerOffSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure sending request")
return
}
@@ -1247,7 +1254,10 @@ func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future Vi
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1271,8 +1281,8 @@ func (client VirtualMachinesClient) Redeploy(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Redeploy")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1285,7 +1295,7 @@ func (client VirtualMachinesClient) Redeploy(ctx context.Context, resourceGroupN
result, err = client.RedeploySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure sending request")
return
}
@@ -1321,7 +1331,10 @@ func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future Vi
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1346,8 +1359,8 @@ func (client VirtualMachinesClient) Reimage(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Reimage")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1360,7 +1373,7 @@ func (client VirtualMachinesClient) Reimage(ctx context.Context, resourceGroupNa
result, err = client.ReimageSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reimage", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reimage", nil, "Failure sending request")
return
}
@@ -1401,7 +1414,10 @@ func (client VirtualMachinesClient) ReimageSender(req *http.Request) (future Vir
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1425,8 +1441,8 @@ func (client VirtualMachinesClient) Restart(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Restart")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1439,7 +1455,7 @@ func (client VirtualMachinesClient) Restart(ctx context.Context, resourceGroupNa
result, err = client.RestartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure sending request")
return
}
@@ -1475,7 +1491,10 @@ func (client VirtualMachinesClient) RestartSender(req *http.Request) (future Vir
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1500,8 +1519,8 @@ func (client VirtualMachinesClient) RunCommand(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.RunCommand")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1520,7 +1539,7 @@ func (client VirtualMachinesClient) RunCommand(ctx context.Context, resourceGrou
result, err = client.RunCommandSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", nil, "Failure sending request")
return
}
@@ -1558,7 +1577,10 @@ func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1583,8 +1605,8 @@ func (client VirtualMachinesClient) Start(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Start")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1597,7 +1619,7 @@ func (client VirtualMachinesClient) Start(ctx context.Context, resourceGroupName
result, err = client.StartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure sending request")
return
}
@@ -1633,7 +1655,10 @@ func (client VirtualMachinesClient) StartSender(req *http.Request) (future Virtu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1658,8 +1683,8 @@ func (client VirtualMachinesClient) Update(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1672,7 +1697,7 @@ func (client VirtualMachinesClient) Update(ctx context.Context, resourceGroupNam
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Update", nil, "Failure sending request")
return
}
@@ -1710,7 +1735,10 @@ func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future Virt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go
index 8c70884d3..dd6037014 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdate(ctx context.
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -67,7 +56,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdate(ctx context.
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -106,7 +95,10 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *h
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -132,8 +124,8 @@ func (client VirtualMachineScaleSetExtensionsClient) Delete(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -146,7 +138,7 @@ func (client VirtualMachineScaleSetExtensionsClient) Delete(ctx context.Context,
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -183,7 +175,10 @@ func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -316,6 +311,7 @@ func (client VirtualMachineScaleSetExtensionsClient) List(ctx context.Context, r
}
if result.vmsselr.hasNextLink() && result.vmsselr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -377,7 +373,6 @@ func (client VirtualMachineScaleSetExtensionsClient) listNextResults(ctx context
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go
index 01de8c75c..003dfc3ee 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) Cancel(ctx context.Con
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetRollingUpgradesClient.Cancel")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) Cancel(ctx context.Con
result, err = client.CancelSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", nil, "Failure sending request")
return
}
@@ -102,7 +91,10 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -204,8 +196,8 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgrade(
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetRollingUpgradesClient.StartExtensionUpgrade")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -218,7 +210,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgrade(
result, err = client.StartExtensionUpgradeSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", nil, "Failure sending request")
return
}
@@ -254,7 +246,10 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeS
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -279,8 +274,8 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgrade(ctx con
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetRollingUpgradesClient.StartOSUpgrade")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -293,7 +288,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgrade(ctx con
result, err = client.StartOSUpgradeSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", nil, "Failure sending request")
return
}
@@ -329,7 +324,10 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(r
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go
index f7b5fd3bc..f7e9469d6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -90,7 +79,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context,
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -128,7 +117,10 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Reque
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -155,8 +147,8 @@ func (client VirtualMachineScaleSetsClient) Deallocate(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Deallocate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -169,7 +161,7 @@ func (client VirtualMachineScaleSetsClient) Deallocate(ctx context.Context, reso
result, err = client.DeallocateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure sending request")
return
}
@@ -210,7 +202,10 @@ func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -234,8 +229,8 @@ func (client VirtualMachineScaleSetsClient) Delete(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -248,7 +243,7 @@ func (client VirtualMachineScaleSetsClient) Delete(ctx context.Context, resource
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure sending request")
return
}
@@ -284,7 +279,10 @@ func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -309,8 +307,8 @@ func (client VirtualMachineScaleSetsClient) DeleteInstances(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.DeleteInstances")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -329,7 +327,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstances(ctx context.Context,
result, err = client.DeleteInstancesSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure sending request")
return
}
@@ -367,7 +365,10 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -649,6 +650,7 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistory(ctx context.Cont
}
if result.vmsslouh.hasNextLink() && result.vmsslouh.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -710,7 +712,6 @@ func (client VirtualMachineScaleSetsClient) getOSUpgradeHistoryNextResults(ctx c
result, err = client.GetOSUpgradeHistoryResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "getOSUpgradeHistoryNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -766,6 +767,7 @@ func (client VirtualMachineScaleSetsClient) List(ctx context.Context, resourceGr
}
if result.vmsslr.hasNextLink() && result.vmsslr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -826,7 +828,6 @@ func (client VirtualMachineScaleSetsClient) listNextResults(ctx context.Context,
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -882,6 +883,7 @@ func (client VirtualMachineScaleSetsClient) ListAll(ctx context.Context) (result
}
if result.vmsslwlr.hasNextLink() && result.vmsslwlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -941,7 +943,6 @@ func (client VirtualMachineScaleSetsClient) listAllNextResults(ctx context.Conte
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -999,6 +1000,7 @@ func (client VirtualMachineScaleSetsClient) ListSkus(ctx context.Context, resour
}
if result.vmsslsr.hasNextLink() && result.vmsslsr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -1060,7 +1062,6 @@ func (client VirtualMachineScaleSetsClient) listSkusNextResults(ctx context.Cont
result, err = client.ListSkusResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listSkusNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -1093,8 +1094,8 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenance(ctx context.Conte
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.PerformMaintenance")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1107,7 +1108,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenance(ctx context.Conte
result, err = client.PerformMaintenanceSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PerformMaintenance", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PerformMaintenance", nil, "Failure sending request")
return
}
@@ -1148,7 +1149,10 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.R
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1174,8 +1178,8 @@ func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.PowerOff")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1188,7 +1192,7 @@ func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resour
result, err = client.PowerOffSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure sending request")
return
}
@@ -1229,7 +1233,10 @@ func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1255,8 +1262,8 @@ func (client VirtualMachineScaleSetsClient) Redeploy(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Redeploy")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1269,7 +1276,7 @@ func (client VirtualMachineScaleSetsClient) Redeploy(ctx context.Context, resour
result, err = client.RedeploySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Redeploy", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Redeploy", nil, "Failure sending request")
return
}
@@ -1310,7 +1317,10 @@ func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1336,8 +1346,8 @@ func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Reimage")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1350,7 +1360,7 @@ func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourc
result, err = client.ReimageSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure sending request")
return
}
@@ -1391,7 +1401,10 @@ func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1417,8 +1430,8 @@ func (client VirtualMachineScaleSetsClient) ReimageAll(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ReimageAll")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1431,7 +1444,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAll(ctx context.Context, reso
result, err = client.ReimageAllSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", nil, "Failure sending request")
return
}
@@ -1472,7 +1485,10 @@ func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1497,8 +1513,8 @@ func (client VirtualMachineScaleSetsClient) Restart(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Restart")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1511,7 +1527,7 @@ func (client VirtualMachineScaleSetsClient) Restart(ctx context.Context, resourc
result, err = client.RestartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure sending request")
return
}
@@ -1552,7 +1568,10 @@ func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1577,8 +1596,8 @@ func (client VirtualMachineScaleSetsClient) Start(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Start")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1591,7 +1610,7 @@ func (client VirtualMachineScaleSetsClient) Start(ctx context.Context, resourceG
result, err = client.StartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure sending request")
return
}
@@ -1632,7 +1651,10 @@ func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1657,8 +1679,8 @@ func (client VirtualMachineScaleSetsClient) Update(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1671,7 +1693,7 @@ func (client VirtualMachineScaleSetsClient) Update(ctx context.Context, resource
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", nil, "Failure sending request")
return
}
@@ -1709,7 +1731,10 @@ func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1735,8 +1760,8 @@ func (client VirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.UpdateInstances")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1755,7 +1780,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context,
result, err = client.UpdateInstancesSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure sending request")
return
}
@@ -1793,7 +1818,10 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go
index 81c38c58b..fb399e30c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -55,8 +44,8 @@ func (client VirtualMachineScaleSetVMsClient) Deallocate(ctx context.Context, re
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Deallocate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -69,7 +58,7 @@ func (client VirtualMachineScaleSetVMsClient) Deallocate(ctx context.Context, re
result, err = client.DeallocateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure sending request")
return
}
@@ -106,7 +95,10 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -131,8 +123,8 @@ func (client VirtualMachineScaleSetVMsClient) Delete(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -145,7 +137,7 @@ func (client VirtualMachineScaleSetVMsClient) Delete(ctx context.Context, resour
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure sending request")
return
}
@@ -182,7 +174,10 @@ func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -393,6 +388,7 @@ func (client VirtualMachineScaleSetVMsClient) List(ctx context.Context, resource
}
if result.vmssvlr.hasNextLink() && result.vmssvlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -463,7 +459,6 @@ func (client VirtualMachineScaleSetVMsClient) listNextResults(ctx context.Contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -494,8 +489,8 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenance(ctx context.Con
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.PerformMaintenance")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -508,7 +503,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenance(ctx context.Con
result, err = client.PerformMaintenanceSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PerformMaintenance", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PerformMaintenance", nil, "Failure sending request")
return
}
@@ -545,7 +540,10 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -571,8 +569,8 @@ func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.PowerOff")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -585,7 +583,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, reso
result, err = client.PowerOffSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure sending request")
return
}
@@ -622,7 +620,10 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -648,8 +649,8 @@ func (client VirtualMachineScaleSetVMsClient) Redeploy(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Redeploy")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -662,7 +663,7 @@ func (client VirtualMachineScaleSetVMsClient) Redeploy(ctx context.Context, reso
result, err = client.RedeploySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Redeploy", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Redeploy", nil, "Failure sending request")
return
}
@@ -699,7 +700,10 @@ func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -725,8 +729,8 @@ func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Reimage")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -739,7 +743,7 @@ func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resou
result, err = client.ReimageSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure sending request")
return
}
@@ -781,7 +785,10 @@ func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -807,8 +814,8 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAll(ctx context.Context, re
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.ReimageAll")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -821,7 +828,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAll(ctx context.Context, re
result, err = client.ReimageAllSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", nil, "Failure sending request")
return
}
@@ -858,7 +865,10 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -883,8 +893,8 @@ func (client VirtualMachineScaleSetVMsClient) Restart(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Restart")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -897,7 +907,7 @@ func (client VirtualMachineScaleSetVMsClient) Restart(ctx context.Context, resou
result, err = client.RestartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure sending request")
return
}
@@ -934,7 +944,10 @@ func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -960,8 +973,8 @@ func (client VirtualMachineScaleSetVMsClient) RunCommand(ctx context.Context, re
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.RunCommand")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -980,7 +993,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommand(ctx context.Context, re
result, err = client.RunCommandSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "RunCommand", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "RunCommand", nil, "Failure sending request")
return
}
@@ -1019,7 +1032,10 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandSender(req *http.Request
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1045,8 +1061,8 @@ func (client VirtualMachineScaleSetVMsClient) Start(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Start")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1059,7 +1075,7 @@ func (client VirtualMachineScaleSetVMsClient) Start(ctx context.Context, resourc
result, err = client.StartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure sending request")
return
}
@@ -1096,7 +1112,10 @@ func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1122,8 +1141,8 @@ func (client VirtualMachineScaleSetVMsClient) Update(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1157,7 +1176,7 @@ func (client VirtualMachineScaleSetVMsClient) Update(ctx context.Context, resour
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Update", nil, "Failure sending request")
return
}
@@ -1200,7 +1219,10 @@ func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go
index c06436e05..449ce499a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go
@@ -1,18 +1,7 @@
package compute
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/CHANGELOG.md
index 0c66d356b..2b9f1f7fa 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/CHANGELOG.md
@@ -1,67 +1,24 @@
-Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82
+# Change History
-Code generator @microsoft.azure/autorest.go@~2.1.161
+## Additive Changes
-## Breaking Changes
+### New Funcs
-- Function `NewAvailableDelegationsResultPage` parameter(s) have been changed from `(func(context.Context, AvailableDelegationsResult) (AvailableDelegationsResult, error))` to `(AvailableDelegationsResult, func(context.Context, AvailableDelegationsResult) (AvailableDelegationsResult, error))`
-- Function `NewExpressRoutePortsLocationListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRoutePortsLocationListResult) (ExpressRoutePortsLocationListResult, error))` to `(ExpressRoutePortsLocationListResult, func(context.Context, ExpressRoutePortsLocationListResult) (ExpressRoutePortsLocationListResult, error))`
-- Function `NewExpressRouteCircuitPeeringListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRouteCircuitPeeringListResult) (ExpressRouteCircuitPeeringListResult, error))` to `(ExpressRouteCircuitPeeringListResult, func(context.Context, ExpressRouteCircuitPeeringListResult) (ExpressRouteCircuitPeeringListResult, error))`
-- Function `NewLocalNetworkGatewayListResultPage` parameter(s) have been changed from `(func(context.Context, LocalNetworkGatewayListResult) (LocalNetworkGatewayListResult, error))` to `(LocalNetworkGatewayListResult, func(context.Context, LocalNetworkGatewayListResult) (LocalNetworkGatewayListResult, error))`
-- Function `NewSecurityGroupListResultPage` parameter(s) have been changed from `(func(context.Context, SecurityGroupListResult) (SecurityGroupListResult, error))` to `(SecurityGroupListResult, func(context.Context, SecurityGroupListResult) (SecurityGroupListResult, error))`
-- Function `NewDdosProtectionPlanListResultPage` parameter(s) have been changed from `(func(context.Context, DdosProtectionPlanListResult) (DdosProtectionPlanListResult, error))` to `(DdosProtectionPlanListResult, func(context.Context, DdosProtectionPlanListResult) (DdosProtectionPlanListResult, error))`
-- Function `NewSecurityRuleListResultPage` parameter(s) have been changed from `(func(context.Context, SecurityRuleListResult) (SecurityRuleListResult, error))` to `(SecurityRuleListResult, func(context.Context, SecurityRuleListResult) (SecurityRuleListResult, error))`
-- Function `NewInterfaceLoadBalancerListResultPage` parameter(s) have been changed from `(func(context.Context, InterfaceLoadBalancerListResult) (InterfaceLoadBalancerListResult, error))` to `(InterfaceLoadBalancerListResult, func(context.Context, InterfaceLoadBalancerListResult) (InterfaceLoadBalancerListResult, error))`
-- Function `NewExpressRouteCrossConnectionListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRouteCrossConnectionListResult) (ExpressRouteCrossConnectionListResult, error))` to `(ExpressRouteCrossConnectionListResult, func(context.Context, ExpressRouteCrossConnectionListResult) (ExpressRouteCrossConnectionListResult, error))`
-- Function `NewOperationListResultPage` parameter(s) have been changed from `(func(context.Context, OperationListResult) (OperationListResult, error))` to `(OperationListResult, func(context.Context, OperationListResult) (OperationListResult, error))`
-- Function `NewAuthorizationListResultPage` parameter(s) have been changed from `(func(context.Context, AuthorizationListResult) (AuthorizationListResult, error))` to `(AuthorizationListResult, func(context.Context, AuthorizationListResult) (AuthorizationListResult, error))`
-- Function `NewVirtualNetworkGatewayListConnectionsResultPage` parameter(s) have been changed from `(func(context.Context, VirtualNetworkGatewayListConnectionsResult) (VirtualNetworkGatewayListConnectionsResult, error))` to `(VirtualNetworkGatewayListConnectionsResult, func(context.Context, VirtualNetworkGatewayListConnectionsResult) (VirtualNetworkGatewayListConnectionsResult, error))`
-- Function `NewVirtualNetworkListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualNetworkListResult) (VirtualNetworkListResult, error))` to `(VirtualNetworkListResult, func(context.Context, VirtualNetworkListResult) (VirtualNetworkListResult, error))`
-- Function `NewRouteFilterListResultPage` parameter(s) have been changed from `(func(context.Context, RouteFilterListResult) (RouteFilterListResult, error))` to `(RouteFilterListResult, func(context.Context, RouteFilterListResult) (RouteFilterListResult, error))`
-- Function `NewUsagesListResultPage` parameter(s) have been changed from `(func(context.Context, UsagesListResult) (UsagesListResult, error))` to `(UsagesListResult, func(context.Context, UsagesListResult) (UsagesListResult, error))`
-- Function `NewLoadBalancerLoadBalancingRuleListResultPage` parameter(s) have been changed from `(func(context.Context, LoadBalancerLoadBalancingRuleListResult) (LoadBalancerLoadBalancingRuleListResult, error))` to `(LoadBalancerLoadBalancingRuleListResult, func(context.Context, LoadBalancerLoadBalancingRuleListResult) (LoadBalancerLoadBalancingRuleListResult, error))`
-- Function `NewExpressRoutePortListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRoutePortListResult) (ExpressRoutePortListResult, error))` to `(ExpressRoutePortListResult, func(context.Context, ExpressRoutePortListResult) (ExpressRoutePortListResult, error))`
-- Function `NewLoadBalancerFrontendIPConfigurationListResultPage` parameter(s) have been changed from `(func(context.Context, LoadBalancerFrontendIPConfigurationListResult) (LoadBalancerFrontendIPConfigurationListResult, error))` to `(LoadBalancerFrontendIPConfigurationListResult, func(context.Context, LoadBalancerFrontendIPConfigurationListResult) (LoadBalancerFrontendIPConfigurationListResult, error))`
-- Function `NewListVpnSitesResultPage` parameter(s) have been changed from `(func(context.Context, ListVpnSitesResult) (ListVpnSitesResult, error))` to `(ListVpnSitesResult, func(context.Context, ListVpnSitesResult) (ListVpnSitesResult, error))`
-- Function `NewServiceEndpointPolicyListResultPage` parameter(s) have been changed from `(func(context.Context, ServiceEndpointPolicyListResult) (ServiceEndpointPolicyListResult, error))` to `(ServiceEndpointPolicyListResult, func(context.Context, ServiceEndpointPolicyListResult) (ServiceEndpointPolicyListResult, error))`
-- Function `NewExpressRouteServiceProviderListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error))` to `(ExpressRouteServiceProviderListResult, func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error))`
-- Function `NewListVpnGatewaysResultPage` parameter(s) have been changed from `(func(context.Context, ListVpnGatewaysResult) (ListVpnGatewaysResult, error))` to `(ListVpnGatewaysResult, func(context.Context, ListVpnGatewaysResult) (ListVpnGatewaysResult, error))`
-- Function `NewApplicationSecurityGroupListResultPage` parameter(s) have been changed from `(func(context.Context, ApplicationSecurityGroupListResult) (ApplicationSecurityGroupListResult, error))` to `(ApplicationSecurityGroupListResult, func(context.Context, ApplicationSecurityGroupListResult) (ApplicationSecurityGroupListResult, error))`
-- Function `NewInterfaceTapConfigurationListResultPage` parameter(s) have been changed from `(func(context.Context, InterfaceTapConfigurationListResult) (InterfaceTapConfigurationListResult, error))` to `(InterfaceTapConfigurationListResult, func(context.Context, InterfaceTapConfigurationListResult) (InterfaceTapConfigurationListResult, error))`
-- Function `NewPublicIPPrefixListResultPage` parameter(s) have been changed from `(func(context.Context, PublicIPPrefixListResult) (PublicIPPrefixListResult, error))` to `(PublicIPPrefixListResult, func(context.Context, PublicIPPrefixListResult) (PublicIPPrefixListResult, error))`
-- Function `NewInterfaceIPConfigurationListResultPage` parameter(s) have been changed from `(func(context.Context, InterfaceIPConfigurationListResult) (InterfaceIPConfigurationListResult, error))` to `(InterfaceIPConfigurationListResult, func(context.Context, InterfaceIPConfigurationListResult) (InterfaceIPConfigurationListResult, error))`
-- Function `NewApplicationGatewayListResultPage` parameter(s) have been changed from `(func(context.Context, ApplicationGatewayListResult) (ApplicationGatewayListResult, error))` to `(ApplicationGatewayListResult, func(context.Context, ApplicationGatewayListResult) (ApplicationGatewayListResult, error))`
-- Function `NewLoadBalancerProbeListResultPage` parameter(s) have been changed from `(func(context.Context, LoadBalancerProbeListResult) (LoadBalancerProbeListResult, error))` to `(LoadBalancerProbeListResult, func(context.Context, LoadBalancerProbeListResult) (LoadBalancerProbeListResult, error))`
-- Function `NewVirtualNetworkPeeringListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualNetworkPeeringListResult) (VirtualNetworkPeeringListResult, error))` to `(VirtualNetworkPeeringListResult, func(context.Context, VirtualNetworkPeeringListResult) (VirtualNetworkPeeringListResult, error))`
-- Function `NewRouteFilterRuleListResultPage` parameter(s) have been changed from `(func(context.Context, RouteFilterRuleListResult) (RouteFilterRuleListResult, error))` to `(RouteFilterRuleListResult, func(context.Context, RouteFilterRuleListResult) (RouteFilterRuleListResult, error))`
-- Function `NewInterfaceEndpointListResultPage` parameter(s) have been changed from `(func(context.Context, InterfaceEndpointListResult) (InterfaceEndpointListResult, error))` to `(InterfaceEndpointListResult, func(context.Context, InterfaceEndpointListResult) (InterfaceEndpointListResult, error))`
-- Function `NewExpressRouteCrossConnectionPeeringListPage` parameter(s) have been changed from `(func(context.Context, ExpressRouteCrossConnectionPeeringList) (ExpressRouteCrossConnectionPeeringList, error))` to `(ExpressRouteCrossConnectionPeeringList, func(context.Context, ExpressRouteCrossConnectionPeeringList) (ExpressRouteCrossConnectionPeeringList, error))`
-- Function `NewPublicIPAddressListResultPage` parameter(s) have been changed from `(func(context.Context, PublicIPAddressListResult) (PublicIPAddressListResult, error))` to `(PublicIPAddressListResult, func(context.Context, PublicIPAddressListResult) (PublicIPAddressListResult, error))`
-- Function `NewListVpnConnectionsResultPage` parameter(s) have been changed from `(func(context.Context, ListVpnConnectionsResult) (ListVpnConnectionsResult, error))` to `(ListVpnConnectionsResult, func(context.Context, ListVpnConnectionsResult) (ListVpnConnectionsResult, error))`
-- Function `NewInboundNatRuleListResultPage` parameter(s) have been changed from `(func(context.Context, InboundNatRuleListResult) (InboundNatRuleListResult, error))` to `(InboundNatRuleListResult, func(context.Context, InboundNatRuleListResult) (InboundNatRuleListResult, error))`
-- Function `NewServiceEndpointPolicyDefinitionListResultPage` parameter(s) have been changed from `(func(context.Context, ServiceEndpointPolicyDefinitionListResult) (ServiceEndpointPolicyDefinitionListResult, error))` to `(ServiceEndpointPolicyDefinitionListResult, func(context.Context, ServiceEndpointPolicyDefinitionListResult) (ServiceEndpointPolicyDefinitionListResult, error))`
-- Function `NewRouteListResultPage` parameter(s) have been changed from `(func(context.Context, RouteListResult) (RouteListResult, error))` to `(RouteListResult, func(context.Context, RouteListResult) (RouteListResult, error))`
-- Function `NewVirtualNetworkListUsageResultPage` parameter(s) have been changed from `(func(context.Context, VirtualNetworkListUsageResult) (VirtualNetworkListUsageResult, error))` to `(VirtualNetworkListUsageResult, func(context.Context, VirtualNetworkListUsageResult) (VirtualNetworkListUsageResult, error))`
-- Function `NewLoadBalancerOutboundRuleListResultPage` parameter(s) have been changed from `(func(context.Context, LoadBalancerOutboundRuleListResult) (LoadBalancerOutboundRuleListResult, error))` to `(LoadBalancerOutboundRuleListResult, func(context.Context, LoadBalancerOutboundRuleListResult) (LoadBalancerOutboundRuleListResult, error))`
-- Function `NewVirtualNetworkGatewayConnectionListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualNetworkGatewayConnectionListResult) (VirtualNetworkGatewayConnectionListResult, error))` to `(VirtualNetworkGatewayConnectionListResult, func(context.Context, VirtualNetworkGatewayConnectionListResult) (VirtualNetworkGatewayConnectionListResult, error))`
-- Function `NewListVirtualWANsResultPage` parameter(s) have been changed from `(func(context.Context, ListVirtualWANsResult) (ListVirtualWANsResult, error))` to `(ListVirtualWANsResult, func(context.Context, ListVirtualWANsResult) (ListVirtualWANsResult, error))`
-- Function `NewEndpointServicesListResultPage` parameter(s) have been changed from `(func(context.Context, EndpointServicesListResult) (EndpointServicesListResult, error))` to `(EndpointServicesListResult, func(context.Context, EndpointServicesListResult) (EndpointServicesListResult, error))`
-- Function `NewApplicationGatewayAvailableSslPredefinedPoliciesPage` parameter(s) have been changed from `(func(context.Context, ApplicationGatewayAvailableSslPredefinedPolicies) (ApplicationGatewayAvailableSslPredefinedPolicies, error))` to `(ApplicationGatewayAvailableSslPredefinedPolicies, func(context.Context, ApplicationGatewayAvailableSslPredefinedPolicies) (ApplicationGatewayAvailableSslPredefinedPolicies, error))`
-- Function `NewInterfaceListResultPage` parameter(s) have been changed from `(func(context.Context, InterfaceListResult) (InterfaceListResult, error))` to `(InterfaceListResult, func(context.Context, InterfaceListResult) (InterfaceListResult, error))`
-- Function `NewExpressRouteLinkListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRouteLinkListResult) (ExpressRouteLinkListResult, error))` to `(ExpressRouteLinkListResult, func(context.Context, ExpressRouteLinkListResult) (ExpressRouteLinkListResult, error))`
-- Function `NewAzureFirewallFqdnTagListResultPage` parameter(s) have been changed from `(func(context.Context, AzureFirewallFqdnTagListResult) (AzureFirewallFqdnTagListResult, error))` to `(AzureFirewallFqdnTagListResult, func(context.Context, AzureFirewallFqdnTagListResult) (AzureFirewallFqdnTagListResult, error))`
-- Function `NewListP2SVpnServerConfigurationsResultPage` parameter(s) have been changed from `(func(context.Context, ListP2SVpnServerConfigurationsResult) (ListP2SVpnServerConfigurationsResult, error))` to `(ListP2SVpnServerConfigurationsResult, func(context.Context, ListP2SVpnServerConfigurationsResult) (ListP2SVpnServerConfigurationsResult, error))`
-- Function `NewProfileListResultPage` parameter(s) have been changed from `(func(context.Context, ProfileListResult) (ProfileListResult, error))` to `(ProfileListResult, func(context.Context, ProfileListResult) (ProfileListResult, error))`
-- Function `NewRouteTableListResultPage` parameter(s) have been changed from `(func(context.Context, RouteTableListResult) (RouteTableListResult, error))` to `(RouteTableListResult, func(context.Context, RouteTableListResult) (RouteTableListResult, error))`
-- Function `NewVirtualNetworkTapListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualNetworkTapListResult) (VirtualNetworkTapListResult, error))` to `(VirtualNetworkTapListResult, func(context.Context, VirtualNetworkTapListResult) (VirtualNetworkTapListResult, error))`
-- Function `NewLoadBalancerListResultPage` parameter(s) have been changed from `(func(context.Context, LoadBalancerListResult) (LoadBalancerListResult, error))` to `(LoadBalancerListResult, func(context.Context, LoadBalancerListResult) (LoadBalancerListResult, error))`
-- Function `NewListVirtualHubsResultPage` parameter(s) have been changed from `(func(context.Context, ListVirtualHubsResult) (ListVirtualHubsResult, error))` to `(ListVirtualHubsResult, func(context.Context, ListVirtualHubsResult) (ListVirtualHubsResult, error))`
-- Function `NewBgpServiceCommunityListResultPage` parameter(s) have been changed from `(func(context.Context, BgpServiceCommunityListResult) (BgpServiceCommunityListResult, error))` to `(BgpServiceCommunityListResult, func(context.Context, BgpServiceCommunityListResult) (BgpServiceCommunityListResult, error))`
-- Function `NewAzureFirewallListResultPage` parameter(s) have been changed from `(func(context.Context, AzureFirewallListResult) (AzureFirewallListResult, error))` to `(AzureFirewallListResult, func(context.Context, AzureFirewallListResult) (AzureFirewallListResult, error))`
-- Function `NewVirtualNetworkGatewayListResultPage` parameter(s) have been changed from `(func(context.Context, VirtualNetworkGatewayListResult) (VirtualNetworkGatewayListResult, error))` to `(VirtualNetworkGatewayListResult, func(context.Context, VirtualNetworkGatewayListResult) (VirtualNetworkGatewayListResult, error))`
-- Function `NewExpressRouteCircuitListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRouteCircuitListResult) (ExpressRouteCircuitListResult, error))` to `(ExpressRouteCircuitListResult, func(context.Context, ExpressRouteCircuitListResult) (ExpressRouteCircuitListResult, error))`
-- Function `NewLoadBalancerBackendAddressPoolListResultPage` parameter(s) have been changed from `(func(context.Context, LoadBalancerBackendAddressPoolListResult) (LoadBalancerBackendAddressPoolListResult, error))` to `(LoadBalancerBackendAddressPoolListResult, func(context.Context, LoadBalancerBackendAddressPoolListResult) (LoadBalancerBackendAddressPoolListResult, error))`
-- Function `NewSubnetListResultPage` parameter(s) have been changed from `(func(context.Context, SubnetListResult) (SubnetListResult, error))` to `(SubnetListResult, func(context.Context, SubnetListResult) (SubnetListResult, error))`
-- Function `NewExpressRouteCircuitConnectionListResultPage` parameter(s) have been changed from `(func(context.Context, ExpressRouteCircuitConnectionListResult) (ExpressRouteCircuitConnectionListResult, error))` to `(ExpressRouteCircuitConnectionListResult, func(context.Context, ExpressRouteCircuitConnectionListResult) (ExpressRouteCircuitConnectionListResult, error))`
-- Function `NewListHubVirtualNetworkConnectionsResultPage` parameter(s) have been changed from `(func(context.Context, ListHubVirtualNetworkConnectionsResult) (ListHubVirtualNetworkConnectionsResult, error))` to `(ListHubVirtualNetworkConnectionsResult, func(context.Context, ListHubVirtualNetworkConnectionsResult) (ListHubVirtualNetworkConnectionsResult, error))`
-- Function `NewListP2SVpnGatewaysResultPage` parameter(s) have been changed from `(func(context.Context, ListP2SVpnGatewaysResult) (ListP2SVpnGatewaysResult, error))` to `(ListP2SVpnGatewaysResult, func(context.Context, ListP2SVpnGatewaysResult) (ListP2SVpnGatewaysResult, error))`
+1. ApplicationSecurityGroupPropertiesFormat.MarshalJSON() ([]byte, error)
+1. AzureFirewallFqdnTagPropertiesFormat.MarshalJSON() ([]byte, error)
+1. BgpPeerStatus.MarshalJSON() ([]byte, error)
+1. ConfigurationDiagnosticResponse.MarshalJSON() ([]byte, error)
+1. ConnectivityHop.MarshalJSON() ([]byte, error)
+1. ConnectivityInformation.MarshalJSON() ([]byte, error)
+1. ConnectivityIssue.MarshalJSON() ([]byte, error)
+1. ContainerNetworkInterfaceIPConfigurationPropertiesFormat.MarshalJSON() ([]byte, error)
+1. DdosProtectionPlanPropertiesFormat.MarshalJSON() ([]byte, error)
+1. ExpressRouteConnectionID.MarshalJSON() ([]byte, error)
+1. ExpressRoutePortsLocationBandwidths.MarshalJSON() ([]byte, error)
+1. GatewayRoute.MarshalJSON() ([]byte, error)
+1. ManagedServiceIdentityUserAssignedIdentitiesValue.MarshalJSON() ([]byte, error)
+1. TunnelConnectionHealth.MarshalJSON() ([]byte, error)
+1. VirtualNetworkUsage.MarshalJSON() ([]byte, error)
+1. VirtualNetworkUsageName.MarshalJSON() ([]byte, error)
+1. VpnSiteID.MarshalJSON() ([]byte, error)
+1. WatcherPropertiesFormat.MarshalJSON() ([]byte, error)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/_meta.json
new file mode 100644
index 000000000..d7647fabe
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/_meta.json
@@ -0,0 +1,11 @@
+{
+ "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82",
+ "readme": "/_/azure-rest-api-specs/specification/network/resource-manager/readme.md",
+ "tag": "package-2018-10",
+ "use": "@microsoft.azure/autorest.go@2.1.183",
+ "repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
+ "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-2018-10 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/network/resource-manager/readme.md",
+ "additional_properties": {
+ "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION"
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationgateways.go
index c747ffa30..f0a6170ee 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationgateways.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationgateways.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client ApplicationGatewaysClient) BackendHealth(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.BackendHealth")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -67,7 +56,7 @@ func (client ApplicationGatewaysClient) BackendHealth(ctx context.Context, resou
result, err = client.BackendHealthSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", nil, "Failure sending request")
return
}
@@ -106,7 +95,10 @@ func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -132,8 +124,8 @@ func (client ApplicationGatewaysClient) CreateOrUpdate(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -174,7 +166,7 @@ func (client ApplicationGatewaysClient) CreateOrUpdate(ctx context.Context, reso
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -212,7 +204,10 @@ func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -237,8 +232,8 @@ func (client ApplicationGatewaysClient) Delete(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -251,7 +246,7 @@ func (client ApplicationGatewaysClient) Delete(ctx context.Context, resourceGrou
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", nil, "Failure sending request")
return
}
@@ -287,7 +282,10 @@ func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -487,6 +485,7 @@ func (client ApplicationGatewaysClient) List(ctx context.Context, resourceGroupN
}
if result.aglr.hasNextLink() && result.aglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -547,7 +546,6 @@ func (client ApplicationGatewaysClient) listNextResults(ctx context.Context, las
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -601,6 +599,7 @@ func (client ApplicationGatewaysClient) ListAll(ctx context.Context) (result App
}
if result.aglr.hasNextLink() && result.aglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -660,7 +659,6 @@ func (client ApplicationGatewaysClient) listAllNextResults(ctx context.Context,
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -785,6 +783,7 @@ func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPolicies(ctx c
}
if result.agaspp.hasNextLink() && result.agaspp.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -844,7 +843,6 @@ func (client ApplicationGatewaysClient) listAvailableSslPredefinedPoliciesNextRe
result, err = client.ListAvailableSslPredefinedPoliciesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listAvailableSslPredefinedPoliciesNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -945,8 +943,8 @@ func (client ApplicationGatewaysClient) Start(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.Start")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -959,7 +957,7 @@ func (client ApplicationGatewaysClient) Start(ctx context.Context, resourceGroup
result, err = client.StartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", nil, "Failure sending request")
return
}
@@ -995,7 +993,10 @@ func (client ApplicationGatewaysClient) StartSender(req *http.Request) (future A
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1019,8 +1020,8 @@ func (client ApplicationGatewaysClient) Stop(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.Stop")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1033,7 +1034,7 @@ func (client ApplicationGatewaysClient) Stop(ctx context.Context, resourceGroupN
result, err = client.StopSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", nil, "Failure sending request")
return
}
@@ -1069,7 +1070,10 @@ func (client ApplicationGatewaysClient) StopSender(req *http.Request) (future Ap
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1094,8 +1098,8 @@ func (client ApplicationGatewaysClient) UpdateTags(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1108,7 +1112,7 @@ func (client ApplicationGatewaysClient) UpdateTags(ctx context.Context, resource
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -1146,7 +1150,10 @@ func (client ApplicationGatewaysClient) UpdateTagsSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationsecuritygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationsecuritygroups.go
index 830525307..830d2ed0f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationsecuritygroups.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/applicationsecuritygroups.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdate(ctx context.Context
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationSecurityGroupsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdate(ctx context.Context
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -105,7 +94,10 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdateSender(req *http.Req
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -130,8 +122,8 @@ func (client ApplicationSecurityGroupsClient) Delete(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationSecurityGroupsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -144,7 +136,7 @@ func (client ApplicationSecurityGroupsClient) Delete(ctx context.Context, resour
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Delete", nil, "Failure sending request")
return
}
@@ -180,7 +172,10 @@ func (client ApplicationSecurityGroupsClient) DeleteSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -306,6 +301,7 @@ func (client ApplicationSecurityGroupsClient) List(ctx context.Context, resource
}
if result.asglr.hasNextLink() && result.asglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -366,7 +362,6 @@ func (client ApplicationSecurityGroupsClient) listNextResults(ctx context.Contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -420,6 +415,7 @@ func (client ApplicationSecurityGroupsClient) ListAll(ctx context.Context) (resu
}
if result.asglr.hasNextLink() && result.asglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -479,7 +475,6 @@ func (client ApplicationSecurityGroupsClient) listAllNextResults(ctx context.Con
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availabledelegations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availabledelegations.go
index d388f1ece..90a3d2b5c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availabledelegations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availabledelegations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -77,6 +66,7 @@ func (client AvailableDelegationsClient) List(ctx context.Context, location stri
}
if result.adr.hasNextLink() && result.adr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -137,7 +127,6 @@ func (client AvailableDelegationsClient) listNextResults(ctx context.Context, la
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.AvailableDelegationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableendpointservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableendpointservices.go
index 1d95c5278..a9be34ca6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableendpointservices.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableendpointservices.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -77,6 +66,7 @@ func (client AvailableEndpointServicesClient) List(ctx context.Context, location
}
if result.eslr.hasNextLink() && result.eslr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -137,7 +127,6 @@ func (client AvailableEndpointServicesClient) listNextResults(ctx context.Contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.AvailableEndpointServicesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableresourcegroupdelegations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableresourcegroupdelegations.go
index bdc77825b..82dbc6b17 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableresourcegroupdelegations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/availableresourcegroupdelegations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -79,6 +68,7 @@ func (client AvailableResourceGroupDelegationsClient) List(ctx context.Context,
}
if result.adr.hasNextLink() && result.adr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -140,7 +130,6 @@ func (client AvailableResourceGroupDelegationsClient) listNextResults(ctx contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.AvailableResourceGroupDelegationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewallfqdntags.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewallfqdntags.go
index 695446135..b7e7da18b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewallfqdntags.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewallfqdntags.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -75,6 +64,7 @@ func (client AzureFirewallFqdnTagsClient) ListAll(ctx context.Context) (result A
}
if result.afftlr.hasNextLink() && result.afftlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -134,7 +124,6 @@ func (client AzureFirewallFqdnTagsClient) listAllNextResults(ctx context.Context
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.AzureFirewallFqdnTagsClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewalls.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewalls.go
index 6f457ce7c..01e444571 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewalls.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/azurefirewalls.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client AzureFirewallsClient) CreateOrUpdate(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/AzureFirewallsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client AzureFirewallsClient) CreateOrUpdate(ctx context.Context, resourceG
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.AzureFirewallsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.AzureFirewallsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client AzureFirewallsClient) CreateOrUpdateSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client AzureFirewallsClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/AzureFirewallsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client AzureFirewallsClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.AzureFirewallsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.AzureFirewallsClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client AzureFirewallsClient) DeleteSender(req *http.Request) (future Azure
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -305,6 +300,7 @@ func (client AzureFirewallsClient) List(ctx context.Context, resourceGroupName s
}
if result.aflr.hasNextLink() && result.aflr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -365,7 +361,6 @@ func (client AzureFirewallsClient) listNextResults(ctx context.Context, lastResu
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.AzureFirewallsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -419,6 +414,7 @@ func (client AzureFirewallsClient) ListAll(ctx context.Context) (result AzureFir
}
if result.aflr.hasNextLink() && result.aflr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -478,7 +474,6 @@ func (client AzureFirewallsClient) listAllNextResults(ctx context.Context, lastR
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.AzureFirewallsClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/bgpservicecommunities.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/bgpservicecommunities.go
index d5d708062..bef8d21b8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/bgpservicecommunities.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/bgpservicecommunities.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -75,6 +64,7 @@ func (client BgpServiceCommunitiesClient) List(ctx context.Context) (result BgpS
}
if result.bsclr.hasNextLink() && result.bsclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -134,7 +124,6 @@ func (client BgpServiceCommunitiesClient) listNextResults(ctx context.Context, l
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/client.go
index 04357463a..efb7a0f19 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/client.go
@@ -3,19 +3,8 @@
// Network Client
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/connectionmonitors.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/connectionmonitors.go
index e07868e25..1b2c2171c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/connectionmonitors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/connectionmonitors.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client ConnectionMonitorsClient) CreateOrUpdate(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/ConnectionMonitorsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -78,7 +67,7 @@ func (client ConnectionMonitorsClient) CreateOrUpdate(ctx context.Context, resou
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -117,7 +106,10 @@ func (client ConnectionMonitorsClient) CreateOrUpdateSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -143,8 +135,8 @@ func (client ConnectionMonitorsClient) Delete(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/ConnectionMonitorsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -157,7 +149,7 @@ func (client ConnectionMonitorsClient) Delete(ctx context.Context, resourceGroup
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Delete", nil, "Failure sending request")
return
}
@@ -194,7 +186,10 @@ func (client ConnectionMonitorsClient) DeleteSender(req *http.Request) (future C
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -373,8 +368,8 @@ func (client ConnectionMonitorsClient) Query(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/ConnectionMonitorsClient.Query")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -387,7 +382,7 @@ func (client ConnectionMonitorsClient) Query(ctx context.Context, resourceGroupN
result, err = client.QuerySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Query", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Query", nil, "Failure sending request")
return
}
@@ -424,7 +419,10 @@ func (client ConnectionMonitorsClient) QuerySender(req *http.Request) (future Co
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -450,8 +448,8 @@ func (client ConnectionMonitorsClient) Start(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/ConnectionMonitorsClient.Start")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -464,7 +462,7 @@ func (client ConnectionMonitorsClient) Start(ctx context.Context, resourceGroupN
result, err = client.StartSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Start", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Start", nil, "Failure sending request")
return
}
@@ -501,7 +499,10 @@ func (client ConnectionMonitorsClient) StartSender(req *http.Request) (future Co
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -526,8 +527,8 @@ func (client ConnectionMonitorsClient) Stop(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/ConnectionMonitorsClient.Stop")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -540,7 +541,7 @@ func (client ConnectionMonitorsClient) Stop(ctx context.Context, resourceGroupNa
result, err = client.StopSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Stop", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "Stop", nil, "Failure sending request")
return
}
@@ -577,7 +578,10 @@ func (client ConnectionMonitorsClient) StopSender(req *http.Request) (future Con
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/ddosprotectionplans.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/ddosprotectionplans.go
index 69ed83d04..5ba2bdd03 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/ddosprotectionplans.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/ddosprotectionplans.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client DdosProtectionPlansClient) CreateOrUpdate(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/DdosProtectionPlansClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client DdosProtectionPlansClient) CreateOrUpdate(ctx context.Context, reso
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -108,7 +97,10 @@ func (client DdosProtectionPlansClient) CreateOrUpdateSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -133,8 +125,8 @@ func (client DdosProtectionPlansClient) Delete(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/DdosProtectionPlansClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -147,7 +139,7 @@ func (client DdosProtectionPlansClient) Delete(ctx context.Context, resourceGrou
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansClient", "Delete", nil, "Failure sending request")
return
}
@@ -183,7 +175,10 @@ func (client DdosProtectionPlansClient) DeleteSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -307,6 +302,7 @@ func (client DdosProtectionPlansClient) List(ctx context.Context) (result DdosPr
}
if result.dpplr.hasNextLink() && result.dpplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -366,7 +362,6 @@ func (client DdosProtectionPlansClient) listNextResults(ctx context.Context, las
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -422,6 +417,7 @@ func (client DdosProtectionPlansClient) ListByResourceGroup(ctx context.Context,
}
if result.dpplr.hasNextLink() && result.dpplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -482,7 +478,6 @@ func (client DdosProtectionPlansClient) listByResourceGroupNextResults(ctx conte
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/defaultsecurityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/defaultsecurityrules.go
index 9a2091635..4357a32ce 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/defaultsecurityrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/defaultsecurityrules.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client DefaultSecurityRulesClient) List(ctx context.Context, resourceGroup
}
if result.srlr.hasNextLink() && result.srlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client DefaultSecurityRulesClient) listNextResults(ctx context.Context, la
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/enums.go
index 9ae11f26a..0c115159d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/enums.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/enums.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitauthorizations.go
index 08d1b0ae4..d8853bf6b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitauthorizations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitauthorizations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -55,8 +44,8 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(ctx context
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitAuthorizationsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -69,7 +58,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(ctx context
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -109,7 +98,10 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -135,8 +127,8 @@ func (client ExpressRouteCircuitAuthorizationsClient) Delete(ctx context.Context
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitAuthorizationsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -149,7 +141,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) Delete(ctx context.Context
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", nil, "Failure sending request")
return
}
@@ -186,7 +178,10 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Req
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -315,6 +310,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) List(ctx context.Context,
}
if result.alr.hasNextLink() && result.alr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -376,7 +372,6 @@ func (client ExpressRouteCircuitAuthorizationsClient) listNextResults(ctx contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitconnections.go
index 0693ef1dd..ebded0f13 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitconnections.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitconnections.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -55,8 +44,8 @@ func (client ExpressRouteCircuitConnectionsClient) CreateOrUpdate(ctx context.Co
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitConnectionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -69,7 +58,7 @@ func (client ExpressRouteCircuitConnectionsClient) CreateOrUpdate(ctx context.Co
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -110,7 +99,10 @@ func (client ExpressRouteCircuitConnectionsClient) CreateOrUpdateSender(req *htt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -137,8 +129,8 @@ func (client ExpressRouteCircuitConnectionsClient) Delete(ctx context.Context, r
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitConnectionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -151,7 +143,7 @@ func (client ExpressRouteCircuitConnectionsClient) Delete(ctx context.Context, r
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -189,7 +181,10 @@ func (client ExpressRouteCircuitConnectionsClient) DeleteSender(req *http.Reques
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -321,6 +316,7 @@ func (client ExpressRouteCircuitConnectionsClient) List(ctx context.Context, res
}
if result.ercclr.hasNextLink() && result.ercclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -383,7 +379,6 @@ func (client ExpressRouteCircuitConnectionsClient) listNextResults(ctx context.C
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitpeerings.go
index 343817367..9edbe7e5b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitpeerings.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuitpeerings.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(ctx context.Conte
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -79,7 +68,7 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(ctx context.Conte
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -119,7 +108,10 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.R
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -145,8 +137,8 @@ func (client ExpressRouteCircuitPeeringsClient) Delete(ctx context.Context, reso
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -159,7 +151,7 @@ func (client ExpressRouteCircuitPeeringsClient) Delete(ctx context.Context, reso
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", nil, "Failure sending request")
return
}
@@ -196,7 +188,10 @@ func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -325,6 +320,7 @@ func (client ExpressRouteCircuitPeeringsClient) List(ctx context.Context, resour
}
if result.ercplr.hasNextLink() && result.ercplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -386,7 +382,6 @@ func (client ExpressRouteCircuitPeeringsClient) listNextResults(ctx context.Cont
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuits.go
index d40fc2327..f5d856a29 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuits.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecircuits.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdate(ctx context.Context, res
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdate(ctx context.Context, res
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -105,7 +94,10 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -130,8 +122,8 @@ func (client ExpressRouteCircuitsClient) Delete(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -144,7 +136,7 @@ func (client ExpressRouteCircuitsClient) Delete(ctx context.Context, resourceGro
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure sending request")
return
}
@@ -180,7 +172,10 @@ func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -460,6 +455,7 @@ func (client ExpressRouteCircuitsClient) List(ctx context.Context, resourceGroup
}
if result.erclr.hasNextLink() && result.erclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -520,7 +516,6 @@ func (client ExpressRouteCircuitsClient) listNextResults(ctx context.Context, la
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -574,6 +569,7 @@ func (client ExpressRouteCircuitsClient) ListAll(ctx context.Context) (result Ex
}
if result.erclr.hasNextLink() && result.erclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -633,7 +629,6 @@ func (client ExpressRouteCircuitsClient) listAllNextResults(ctx context.Context,
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -665,8 +660,8 @@ func (client ExpressRouteCircuitsClient) ListArpTable(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListArpTable")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -679,7 +674,7 @@ func (client ExpressRouteCircuitsClient) ListArpTable(ctx context.Context, resou
result, err = client.ListArpTableSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure sending request")
return
}
@@ -717,7 +712,10 @@ func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -745,8 +743,8 @@ func (client ExpressRouteCircuitsClient) ListRoutesTable(ctx context.Context, re
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListRoutesTable")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -759,7 +757,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTable(ctx context.Context, re
result, err = client.ListRoutesTableSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure sending request")
return
}
@@ -797,7 +795,10 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -825,8 +826,8 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(ctx context.Cont
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListRoutesTableSummary")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -839,7 +840,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(ctx context.Cont
result, err = client.ListRoutesTableSummarySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", nil, "Failure sending request")
return
}
@@ -877,7 +878,10 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http.
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -903,8 +907,8 @@ func (client ExpressRouteCircuitsClient) UpdateTags(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -917,7 +921,7 @@ func (client ExpressRouteCircuitsClient) UpdateTags(ctx context.Context, resourc
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -955,7 +959,10 @@ func (client ExpressRouteCircuitsClient) UpdateTagsSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteconnections.go
index 17ccd95ee..a3dfd826f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteconnections.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteconnections.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client ExpressRouteConnectionsClient) CreateOrUpdate(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteConnectionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -76,7 +65,7 @@ func (client ExpressRouteConnectionsClient) CreateOrUpdate(ctx context.Context,
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -115,7 +104,10 @@ func (client ExpressRouteConnectionsClient) CreateOrUpdateSender(req *http.Reque
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -141,8 +133,8 @@ func (client ExpressRouteConnectionsClient) Delete(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteConnectionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -155,7 +147,7 @@ func (client ExpressRouteConnectionsClient) Delete(ctx context.Context, resource
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -192,7 +184,10 @@ func (client ExpressRouteConnectionsClient) DeleteSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnectionpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnectionpeerings.go
index 3f29d8b9d..4c3339543 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnectionpeerings.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnectionpeerings.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -56,8 +45,8 @@ func (client ExpressRouteCrossConnectionPeeringsClient) CreateOrUpdate(ctx conte
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionPeeringsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -81,7 +70,7 @@ func (client ExpressRouteCrossConnectionPeeringsClient) CreateOrUpdate(ctx conte
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -121,7 +110,10 @@ func (client ExpressRouteCrossConnectionPeeringsClient) CreateOrUpdateSender(req
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -147,8 +139,8 @@ func (client ExpressRouteCrossConnectionPeeringsClient) Delete(ctx context.Conte
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionPeeringsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -161,7 +153,7 @@ func (client ExpressRouteCrossConnectionPeeringsClient) Delete(ctx context.Conte
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsClient", "Delete", nil, "Failure sending request")
return
}
@@ -198,7 +190,10 @@ func (client ExpressRouteCrossConnectionPeeringsClient) DeleteSender(req *http.R
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -327,6 +322,7 @@ func (client ExpressRouteCrossConnectionPeeringsClient) List(ctx context.Context
}
if result.erccpl.hasNextLink() && result.erccpl.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -388,7 +384,6 @@ func (client ExpressRouteCrossConnectionPeeringsClient) listNextResults(ctx cont
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnections.go
index f4ee3ea20..8a98df8ed 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnections.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutecrossconnections.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client ExpressRouteCrossConnectionsClient) CreateOrUpdate(ctx context.Cont
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client ExpressRouteCrossConnectionsClient) CreateOrUpdate(ctx context.Cont
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -105,7 +94,10 @@ func (client ExpressRouteCrossConnectionsClient) CreateOrUpdateSender(req *http.
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -230,6 +222,7 @@ func (client ExpressRouteCrossConnectionsClient) List(ctx context.Context) (resu
}
if result.ercclr.hasNextLink() && result.ercclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -289,7 +282,6 @@ func (client ExpressRouteCrossConnectionsClient) listNextResults(ctx context.Con
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -322,8 +314,8 @@ func (client ExpressRouteCrossConnectionsClient) ListArpTable(ctx context.Contex
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListArpTable")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -336,7 +328,7 @@ func (client ExpressRouteCrossConnectionsClient) ListArpTable(ctx context.Contex
result, err = client.ListArpTableSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListArpTable", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListArpTable", nil, "Failure sending request")
return
}
@@ -374,7 +366,10 @@ func (client ExpressRouteCrossConnectionsClient) ListArpTableSender(req *http.Re
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -425,6 +420,7 @@ func (client ExpressRouteCrossConnectionsClient) ListByResourceGroup(ctx context
}
if result.ercclr.hasNextLink() && result.ercclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -485,7 +481,6 @@ func (client ExpressRouteCrossConnectionsClient) listByResourceGroupNextResults(
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -518,8 +513,8 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTable(ctx context.Con
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListRoutesTable")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -532,7 +527,7 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTable(ctx context.Con
result, err = client.ListRoutesTableSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTable", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTable", nil, "Failure sending request")
return
}
@@ -570,7 +565,10 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSender(req *http
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -598,8 +596,8 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummary(ctx cont
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListRoutesTableSummary")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -612,7 +610,7 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummary(ctx cont
result, err = client.ListRoutesTableSummarySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTableSummary", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTableSummary", nil, "Failure sending request")
return
}
@@ -650,7 +648,10 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummarySender(re
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -676,8 +677,8 @@ func (client ExpressRouteCrossConnectionsClient) UpdateTags(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -690,7 +691,7 @@ func (client ExpressRouteCrossConnectionsClient) UpdateTags(ctx context.Context,
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -728,7 +729,10 @@ func (client ExpressRouteCrossConnectionsClient) UpdateTagsSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutegateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutegateways.go
index a50314170..0a248c731 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutegateways.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutegateways.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client ExpressRouteGatewaysClient) CreateOrUpdate(ctx context.Context, res
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteGatewaysClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -74,7 +63,7 @@ func (client ExpressRouteGatewaysClient) CreateOrUpdate(ctx context.Context, res
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -113,7 +102,10 @@ func (client ExpressRouteGatewaysClient) CreateOrUpdateSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -139,8 +131,8 @@ func (client ExpressRouteGatewaysClient) Delete(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteGatewaysClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -153,7 +145,7 @@ func (client ExpressRouteGatewaysClient) Delete(ctx context.Context, resourceGro
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "Delete", nil, "Failure sending request")
return
}
@@ -189,7 +181,10 @@ func (client ExpressRouteGatewaysClient) DeleteSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutelinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutelinks.go
index d9e3ac821..1c4d6060c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutelinks.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressroutelinks.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client ExpressRouteLinksClient) List(ctx context.Context, resourceGroupNam
}
if result.erllr.hasNextLink() && result.erllr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client ExpressRouteLinksClient) listNextResults(ctx context.Context, lastR
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteLinksClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteports.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteports.go
index 4196cf9a2..874c9a0bb 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteports.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteports.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client ExpressRoutePortsClient) CreateOrUpdate(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRoutePortsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client ExpressRoutePortsClient) CreateOrUpdate(ctx context.Context, resour
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -105,7 +94,10 @@ func (client ExpressRoutePortsClient) CreateOrUpdateSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -130,8 +122,8 @@ func (client ExpressRoutePortsClient) Delete(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRoutePortsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -144,7 +136,7 @@ func (client ExpressRoutePortsClient) Delete(ctx context.Context, resourceGroupN
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "Delete", nil, "Failure sending request")
return
}
@@ -180,7 +172,10 @@ func (client ExpressRoutePortsClient) DeleteSender(req *http.Request) (future Ex
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -304,6 +299,7 @@ func (client ExpressRoutePortsClient) List(ctx context.Context) (result ExpressR
}
if result.erplr.hasNextLink() && result.erplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -363,7 +359,6 @@ func (client ExpressRoutePortsClient) listNextResults(ctx context.Context, lastR
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -419,6 +414,7 @@ func (client ExpressRoutePortsClient) ListByResourceGroup(ctx context.Context, r
}
if result.erplr.hasNextLink() && result.erplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -479,7 +475,6 @@ func (client ExpressRoutePortsClient) listByResourceGroupNextResults(ctx context
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -510,8 +505,8 @@ func (client ExpressRoutePortsClient) UpdateTags(ctx context.Context, resourceGr
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRoutePortsClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -524,7 +519,7 @@ func (client ExpressRoutePortsClient) UpdateTags(ctx context.Context, resourceGr
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -562,7 +557,10 @@ func (client ExpressRoutePortsClient) UpdateTagsSender(req *http.Request) (futur
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteportslocations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteportslocations.go
index 9742360f1..4a11798ad 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteportslocations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteportslocations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -151,6 +140,7 @@ func (client ExpressRoutePortsLocationsClient) List(ctx context.Context) (result
}
if result.erpllr.hasNextLink() && result.erpllr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -210,7 +200,6 @@ func (client ExpressRoutePortsLocationsClient) listNextResults(ctx context.Conte
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsLocationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteserviceproviders.go
index beb65bd21..48de50145 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteserviceproviders.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/expressrouteserviceproviders.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -75,6 +64,7 @@ func (client ExpressRouteServiceProvidersClient) List(ctx context.Context) (resu
}
if result.ersplr.hasNextLink() && result.ersplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -134,7 +124,6 @@ func (client ExpressRouteServiceProvidersClient) listNextResults(ctx context.Con
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/hubvirtualnetworkconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/hubvirtualnetworkconnections.go
index cbdb7bcbb..7369edba5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/hubvirtualnetworkconnections.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/hubvirtualnetworkconnections.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client HubVirtualNetworkConnectionsClient) List(ctx context.Context, resou
}
if result.lhvncr.hasNextLink() && result.lhvncr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client HubVirtualNetworkConnectionsClient) listNextResults(ctx context.Con
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.HubVirtualNetworkConnectionsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/inboundnatrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/inboundnatrules.go
index 8ca0bb719..f54d1c29d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/inboundnatrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/inboundnatrules.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client InboundNatRulesClient) CreateOrUpdate(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/InboundNatRulesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -86,7 +75,7 @@ func (client InboundNatRulesClient) CreateOrUpdate(ctx context.Context, resource
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -125,7 +114,10 @@ func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -151,8 +143,8 @@ func (client InboundNatRulesClient) Delete(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/InboundNatRulesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -165,7 +157,7 @@ func (client InboundNatRulesClient) Delete(ctx context.Context, resourceGroupNam
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Delete", nil, "Failure sending request")
return
}
@@ -202,7 +194,10 @@ func (client InboundNatRulesClient) DeleteSender(req *http.Request) (future Inbo
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -335,6 +330,7 @@ func (client InboundNatRulesClient) List(ctx context.Context, resourceGroupName
}
if result.inrlr.hasNextLink() && result.inrlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -396,7 +392,6 @@ func (client InboundNatRulesClient) listNextResults(ctx context.Context, lastRes
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceendpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceendpoints.go
index d6d513140..d1a7db78f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceendpoints.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceendpoints.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client InterfaceEndpointsClient) CreateOrUpdate(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceEndpointsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client InterfaceEndpointsClient) CreateOrUpdate(ctx context.Context, resou
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client InterfaceEndpointsClient) CreateOrUpdateSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client InterfaceEndpointsClient) Delete(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceEndpointsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client InterfaceEndpointsClient) Delete(ctx context.Context, resourceGroup
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client InterfaceEndpointsClient) DeleteSender(req *http.Request) (future I
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -309,6 +304,7 @@ func (client InterfaceEndpointsClient) List(ctx context.Context, resourceGroupNa
}
if result.ielr.hasNextLink() && result.ielr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -369,7 +365,6 @@ func (client InterfaceEndpointsClient) listNextResults(ctx context.Context, last
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -423,6 +418,7 @@ func (client InterfaceEndpointsClient) ListBySubscription(ctx context.Context) (
}
if result.ielr.hasNextLink() && result.ielr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -482,7 +478,6 @@ func (client InterfaceEndpointsClient) listBySubscriptionNextResults(ctx context
result, err = client.ListBySubscriptionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceipconfigurations.go
index 937e884c7..f3247fbd1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceipconfigurations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceipconfigurations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client InterfaceIPConfigurationsClient) List(ctx context.Context, resource
}
if result.iiclr.hasNextLink() && result.iiclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client InterfaceIPConfigurationsClient) listNextResults(ctx context.Contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceloadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceloadbalancers.go
index fe15a5f9d..77302fafb 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceloadbalancers.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfaceloadbalancers.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -78,6 +67,7 @@ func (client InterfaceLoadBalancersClient) List(ctx context.Context, resourceGro
}
if result.ilblr.hasNextLink() && result.ilblr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -139,7 +129,6 @@ func (client InterfaceLoadBalancersClient) listNextResults(ctx context.Context,
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceLoadBalancersClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacesgroup.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacesgroup.go
index e8c646a12..6dfe96914 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacesgroup.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacesgroup.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client InterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client InterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroup
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -103,7 +92,10 @@ func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (future I
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -128,8 +120,8 @@ func (client InterfacesClient) Delete(ctx context.Context, resourceGroupName str
ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -142,7 +134,7 @@ func (client InterfacesClient) Delete(ctx context.Context, resourceGroupName str
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure sending request")
return
}
@@ -178,7 +170,10 @@ func (client InterfacesClient) DeleteSender(req *http.Request) (future Interface
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -282,8 +277,8 @@ func (client InterfacesClient) GetEffectiveRouteTable(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.GetEffectiveRouteTable")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -296,7 +291,7 @@ func (client InterfacesClient) GetEffectiveRouteTable(ctx context.Context, resou
result, err = client.GetEffectiveRouteTableSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure sending request")
return
}
@@ -332,7 +327,10 @@ func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -554,6 +552,7 @@ func (client InterfacesClient) List(ctx context.Context, resourceGroupName strin
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -614,7 +613,6 @@ func (client InterfacesClient) listNextResults(ctx context.Context, lastResults
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -668,6 +666,7 @@ func (client InterfacesClient) ListAll(ctx context.Context) (result InterfaceLis
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -727,7 +726,6 @@ func (client InterfacesClient) listAllNextResults(ctx context.Context, lastResul
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -757,8 +755,8 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(ctx context.Co
ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.ListEffectiveNetworkSecurityGroups")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -771,7 +769,7 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(ctx context.Co
result, err = client.ListEffectiveNetworkSecurityGroupsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure sending request")
return
}
@@ -807,7 +805,10 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *htt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -863,6 +864,7 @@ func (client InterfacesClient) ListVirtualMachineScaleSetIPConfigurations(ctx co
}
if result.iiclr.hasNextLink() && result.iiclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -929,7 +931,6 @@ func (client InterfacesClient) listVirtualMachineScaleSetIPConfigurationsNextRes
result, err = client.ListVirtualMachineScaleSetIPConfigurationsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetIPConfigurationsNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -986,6 +987,7 @@ func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(ctx c
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -1047,7 +1049,6 @@ func (client InterfacesClient) listVirtualMachineScaleSetNetworkInterfacesNextRe
result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetNetworkInterfacesNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -1106,6 +1107,7 @@ func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(ctx
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -1168,7 +1170,6 @@ func (client InterfacesClient) listVirtualMachineScaleSetVMNetworkInterfacesNext
result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetVMNetworkInterfacesNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -1199,8 +1200,8 @@ func (client InterfacesClient) UpdateTags(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1213,7 +1214,7 @@ func (client InterfacesClient) UpdateTags(ctx context.Context, resourceGroupName
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfacesClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfacesClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -1251,7 +1252,10 @@ func (client InterfacesClient) UpdateTagsSender(req *http.Request) (future Inter
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacetapconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacetapconfigurations.go
index 7cfafabf9..0f52e8d81 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacetapconfigurations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/interfacetapconfigurations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client InterfaceTapConfigurationsClient) CreateOrUpdate(ctx context.Contex
ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceTapConfigurationsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -103,7 +92,7 @@ func (client InterfaceTapConfigurationsClient) CreateOrUpdate(ctx context.Contex
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -143,7 +132,10 @@ func (client InterfaceTapConfigurationsClient) CreateOrUpdateSender(req *http.Re
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -169,8 +161,8 @@ func (client InterfaceTapConfigurationsClient) Delete(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceTapConfigurationsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -183,7 +175,7 @@ func (client InterfaceTapConfigurationsClient) Delete(ctx context.Context, resou
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsClient", "Delete", nil, "Failure sending request")
return
}
@@ -220,7 +212,10 @@ func (client InterfaceTapConfigurationsClient) DeleteSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -349,6 +344,7 @@ func (client InterfaceTapConfigurationsClient) List(ctx context.Context, resourc
}
if result.itclr.hasNextLink() && result.itclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -410,7 +406,6 @@ func (client InterfaceTapConfigurationsClient) listNextResults(ctx context.Conte
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerbackendaddresspools.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerbackendaddresspools.go
index a7336db59..9c1f5a9b9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerbackendaddresspools.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerbackendaddresspools.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client LoadBalancerBackendAddressPoolsClient) List(ctx context.Context, re
}
if result.lbbaplr.hasNextLink() && result.lbbaplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client LoadBalancerBackendAddressPoolsClient) listNextResults(ctx context.
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerfrontendipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerfrontendipconfigurations.go
index f777854d3..514e14bf2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerfrontendipconfigurations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerfrontendipconfigurations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -157,6 +146,7 @@ func (client LoadBalancerFrontendIPConfigurationsClient) List(ctx context.Contex
}
if result.lbficlr.hasNextLink() && result.lbficlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -218,7 +208,6 @@ func (client LoadBalancerFrontendIPConfigurationsClient) listNextResults(ctx con
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerloadbalancingrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerloadbalancingrules.go
index b2d20a470..d74adc96d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerloadbalancingrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerloadbalancingrules.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client LoadBalancerLoadBalancingRulesClient) List(ctx context.Context, res
}
if result.lblbrlr.hasNextLink() && result.lblbrlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client LoadBalancerLoadBalancingRulesClient) listNextResults(ctx context.C
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancernetworkinterfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancernetworkinterfaces.go
index 5cd443852..471399113 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancernetworkinterfaces.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancernetworkinterfaces.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -78,6 +67,7 @@ func (client LoadBalancerNetworkInterfacesClient) List(ctx context.Context, reso
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -139,7 +129,6 @@ func (client LoadBalancerNetworkInterfacesClient) listNextResults(ctx context.Co
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalanceroutboundrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalanceroutboundrules.go
index 6162e1310..acfbed6ff 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalanceroutboundrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalanceroutboundrules.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client LoadBalancerOutboundRulesClient) List(ctx context.Context, resource
}
if result.lborlr.hasNextLink() && result.lborlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client LoadBalancerOutboundRulesClient) listNextResults(ctx context.Contex
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerOutboundRulesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerprobes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerprobes.go
index 8e6f98154..bcfce8e84 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerprobes.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancerprobes.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -156,6 +145,7 @@ func (client LoadBalancerProbesClient) List(ctx context.Context, resourceGroupNa
}
if result.lbplr.hasNextLink() && result.lbplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -217,7 +207,6 @@ func (client LoadBalancerProbesClient) listNextResults(ctx context.Context, last
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancers.go
index 74dc90c87..0efe6b08d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancers.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/loadbalancers.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client LoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGr
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client LoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGr
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -103,7 +92,10 @@ func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (futur
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -128,8 +120,8 @@ func (client LoadBalancersClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -142,7 +134,7 @@ func (client LoadBalancersClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", nil, "Failure sending request")
return
}
@@ -178,7 +170,10 @@ func (client LoadBalancersClient) DeleteSender(req *http.Request) (future LoadBa
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -308,6 +303,7 @@ func (client LoadBalancersClient) List(ctx context.Context, resourceGroupName st
}
if result.lblr.hasNextLink() && result.lblr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -368,7 +364,6 @@ func (client LoadBalancersClient) listNextResults(ctx context.Context, lastResul
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -422,6 +417,7 @@ func (client LoadBalancersClient) ListAll(ctx context.Context) (result LoadBalan
}
if result.lblr.hasNextLink() && result.lblr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -481,7 +477,6 @@ func (client LoadBalancersClient) listAllNextResults(ctx context.Context, lastRe
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -512,8 +507,8 @@ func (client LoadBalancersClient) UpdateTags(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -526,7 +521,7 @@ func (client LoadBalancersClient) UpdateTags(ctx context.Context, resourceGroupN
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -564,7 +559,10 @@ func (client LoadBalancersClient) UpdateTagsSender(req *http.Request) (future Lo
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/localnetworkgateways.go
index ebf91600e..2cd980a88 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/localnetworkgateways.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/localnetworkgateways.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdate(ctx context.Context, res
ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -75,7 +64,7 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdate(ctx context.Context, res
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -113,7 +102,10 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -138,8 +130,8 @@ func (client LocalNetworkGatewaysClient) Delete(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -158,7 +150,7 @@ func (client LocalNetworkGatewaysClient) Delete(ctx context.Context, resourceGro
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", nil, "Failure sending request")
return
}
@@ -194,7 +186,10 @@ func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -326,6 +321,7 @@ func (client LocalNetworkGatewaysClient) List(ctx context.Context, resourceGroup
}
if result.lnglr.hasNextLink() && result.lnglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -386,7 +382,6 @@ func (client LocalNetworkGatewaysClient) listNextResults(ctx context.Context, la
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -417,8 +412,8 @@ func (client LocalNetworkGatewaysClient) UpdateTags(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -437,7 +432,7 @@ func (client LocalNetworkGatewaysClient) UpdateTags(ctx context.Context, resourc
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -475,7 +470,10 @@ func (client LocalNetworkGatewaysClient) UpdateTagsSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/models.go
index a90d994a3..4e296dc54 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/models.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -2323,12 +2312,25 @@ func (agrrspf ApplicationGatewayRewriteRuleSetPropertiesFormat) MarshalJSON() ([
// ApplicationGatewaysBackendHealthFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ApplicationGatewaysBackendHealthFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationGatewaysClient) (ApplicationGatewayBackendHealth, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationGatewaysBackendHealthFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationGatewaysBackendHealthFuture) Result(client ApplicationGatewaysClient) (agbh ApplicationGatewayBackendHealth, err error) {
+// result is the default implementation for ApplicationGatewaysBackendHealthFuture.Result.
+func (future *ApplicationGatewaysBackendHealthFuture) result(client ApplicationGatewaysClient) (agbh ApplicationGatewayBackendHealth, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2336,6 +2338,7 @@ func (future *ApplicationGatewaysBackendHealthFuture) Result(client ApplicationG
return
}
if !done {
+ agbh.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysBackendHealthFuture")
return
}
@@ -2352,12 +2355,25 @@ func (future *ApplicationGatewaysBackendHealthFuture) Result(client ApplicationG
// ApplicationGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ApplicationGatewaysCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationGatewaysClient) (ApplicationGateway, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationGatewaysCreateOrUpdateFuture) Result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ApplicationGatewaysCreateOrUpdateFuture.Result.
+func (future *ApplicationGatewaysCreateOrUpdateFuture) result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2365,6 +2381,7 @@ func (future *ApplicationGatewaysCreateOrUpdateFuture) Result(client Application
return
}
if !done {
+ ag.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysCreateOrUpdateFuture")
return
}
@@ -2381,12 +2398,25 @@ func (future *ApplicationGatewaysCreateOrUpdateFuture) Result(client Application
// ApplicationGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ApplicationGatewaysDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationGatewaysDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationGatewaysDeleteFuture) Result(client ApplicationGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for ApplicationGatewaysDeleteFuture.Result.
+func (future *ApplicationGatewaysDeleteFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2394,6 +2424,7 @@ func (future *ApplicationGatewaysDeleteFuture) Result(client ApplicationGateways
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysDeleteFuture")
return
}
@@ -2613,12 +2644,25 @@ type ApplicationGatewaySslPredefinedPolicyPropertiesFormat struct {
// ApplicationGatewaysStartFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ApplicationGatewaysStartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationGatewaysStartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationGatewaysStartFuture) Result(client ApplicationGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for ApplicationGatewaysStartFuture.Result.
+func (future *ApplicationGatewaysStartFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2626,6 +2670,7 @@ func (future *ApplicationGatewaysStartFuture) Result(client ApplicationGatewaysC
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStartFuture")
return
}
@@ -2636,12 +2681,25 @@ func (future *ApplicationGatewaysStartFuture) Result(client ApplicationGatewaysC
// ApplicationGatewaysStopFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ApplicationGatewaysStopFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationGatewaysStopFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationGatewaysStopFuture) Result(client ApplicationGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for ApplicationGatewaysStopFuture.Result.
+func (future *ApplicationGatewaysStopFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2649,6 +2707,7 @@ func (future *ApplicationGatewaysStopFuture) Result(client ApplicationGatewaysCl
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStopFuture")
return
}
@@ -2659,12 +2718,25 @@ func (future *ApplicationGatewaysStopFuture) Result(client ApplicationGatewaysCl
// ApplicationGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ApplicationGatewaysUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationGatewaysClient) (ApplicationGateway, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationGatewaysUpdateTagsFuture) Result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) {
+// result is the default implementation for ApplicationGatewaysUpdateTagsFuture.Result.
+func (future *ApplicationGatewaysUpdateTagsFuture) result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -2672,6 +2744,7 @@ func (future *ApplicationGatewaysUpdateTagsFuture) Result(client ApplicationGate
return
}
if !done {
+ ag.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysUpdateTagsFuture")
return
}
@@ -3218,15 +3291,34 @@ type ApplicationSecurityGroupPropertiesFormat struct {
ProvisioningState *string `json:"provisioningState,omitempty"`
}
+// MarshalJSON is the custom marshaler for ApplicationSecurityGroupPropertiesFormat.
+func (asgpf ApplicationSecurityGroupPropertiesFormat) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ApplicationSecurityGroupsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type ApplicationSecurityGroupsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationSecurityGroupsClient) (ApplicationSecurityGroup, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) Result(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) {
+// result is the default implementation for ApplicationSecurityGroupsCreateOrUpdateFuture.Result.
+func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) result(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3234,6 +3326,7 @@ func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) Result(client Appli
return
}
if !done {
+ asg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsCreateOrUpdateFuture")
return
}
@@ -3250,12 +3343,25 @@ func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) Result(client Appli
// ApplicationSecurityGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ApplicationSecurityGroupsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ApplicationSecurityGroupsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ApplicationSecurityGroupsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ApplicationSecurityGroupsDeleteFuture) Result(client ApplicationSecurityGroupsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ApplicationSecurityGroupsDeleteFuture.Result.
+func (future *ApplicationSecurityGroupsDeleteFuture) result(client ApplicationSecurityGroupsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3263,6 +3369,7 @@ func (future *ApplicationSecurityGroupsDeleteFuture) Result(client ApplicationSe
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsDeleteFuture")
return
}
@@ -4202,6 +4309,12 @@ type AzureFirewallFqdnTagPropertiesFormat struct {
FqdnTagName *string `json:"fqdnTagName,omitempty"`
}
+// MarshalJSON is the custom marshaler for AzureFirewallFqdnTagPropertiesFormat.
+func (afftpf AzureFirewallFqdnTagPropertiesFormat) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// AzureFirewallIPConfiguration IP configuration of an Azure Firewall.
type AzureFirewallIPConfiguration struct {
*AzureFirewallIPConfigurationPropertiesFormat `json:"properties,omitempty"`
@@ -4708,12 +4821,25 @@ type AzureFirewallRCAction struct {
// AzureFirewallsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type AzureFirewallsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(AzureFirewallsClient) (AzureFirewall, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *AzureFirewallsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *AzureFirewallsCreateOrUpdateFuture) Result(client AzureFirewallsClient) (af AzureFirewall, err error) {
+// result is the default implementation for AzureFirewallsCreateOrUpdateFuture.Result.
+func (future *AzureFirewallsCreateOrUpdateFuture) result(client AzureFirewallsClient) (af AzureFirewall, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -4721,6 +4847,7 @@ func (future *AzureFirewallsCreateOrUpdateFuture) Result(client AzureFirewallsCl
return
}
if !done {
+ af.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsCreateOrUpdateFuture")
return
}
@@ -4737,12 +4864,25 @@ func (future *AzureFirewallsCreateOrUpdateFuture) Result(client AzureFirewallsCl
// AzureFirewallsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type AzureFirewallsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(AzureFirewallsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *AzureFirewallsDeleteFuture) Result(client AzureFirewallsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *AzureFirewallsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for AzureFirewallsDeleteFuture.Result.
+func (future *AzureFirewallsDeleteFuture) result(client AzureFirewallsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -4750,6 +4890,7 @@ func (future *AzureFirewallsDeleteFuture) Result(client AzureFirewallsClient) (a
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsDeleteFuture")
return
}
@@ -4949,6 +5090,12 @@ type BgpPeerStatus struct {
MessagesReceived *int64 `json:"messagesReceived,omitempty"`
}
+// MarshalJSON is the custom marshaler for BgpPeerStatus.
+func (bps BgpPeerStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// BgpPeerStatusListResult response for list BGP peer status API service call
type BgpPeerStatusListResult struct {
autorest.Response `json:"-"`
@@ -5267,6 +5414,12 @@ type ConfigurationDiagnosticResponse struct {
Results *[]ConfigurationDiagnosticResult `json:"results,omitempty"`
}
+// MarshalJSON is the custom marshaler for ConfigurationDiagnosticResponse.
+func (cdr ConfigurationDiagnosticResponse) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ConfigurationDiagnosticResult network configuration diagnostic result corresponded to provided traffic
// query.
type ConfigurationDiagnosticResult struct {
@@ -5508,12 +5661,25 @@ type ConnectionMonitorResultProperties struct {
// ConnectionMonitorsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ConnectionMonitorsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ConnectionMonitorsClient) (ConnectionMonitorResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ConnectionMonitorsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ConnectionMonitorsCreateOrUpdateFuture) Result(client ConnectionMonitorsClient) (cmr ConnectionMonitorResult, err error) {
+// result is the default implementation for ConnectionMonitorsCreateOrUpdateFuture.Result.
+func (future *ConnectionMonitorsCreateOrUpdateFuture) result(client ConnectionMonitorsClient) (cmr ConnectionMonitorResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5521,6 +5687,7 @@ func (future *ConnectionMonitorsCreateOrUpdateFuture) Result(client ConnectionMo
return
}
if !done {
+ cmr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsCreateOrUpdateFuture")
return
}
@@ -5537,12 +5704,25 @@ func (future *ConnectionMonitorsCreateOrUpdateFuture) Result(client ConnectionMo
// ConnectionMonitorsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ConnectionMonitorsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ConnectionMonitorsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ConnectionMonitorsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ConnectionMonitorsDeleteFuture) Result(client ConnectionMonitorsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ConnectionMonitorsDeleteFuture.Result.
+func (future *ConnectionMonitorsDeleteFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5550,6 +5730,7 @@ func (future *ConnectionMonitorsDeleteFuture) Result(client ConnectionMonitorsCl
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsDeleteFuture")
return
}
@@ -5568,12 +5749,25 @@ type ConnectionMonitorSource struct {
// ConnectionMonitorsQueryFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ConnectionMonitorsQueryFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ConnectionMonitorsClient) (ConnectionMonitorQueryResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ConnectionMonitorsQueryFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ConnectionMonitorsQueryFuture) Result(client ConnectionMonitorsClient) (cmqr ConnectionMonitorQueryResult, err error) {
+// result is the default implementation for ConnectionMonitorsQueryFuture.Result.
+func (future *ConnectionMonitorsQueryFuture) result(client ConnectionMonitorsClient) (cmqr ConnectionMonitorQueryResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5581,6 +5775,7 @@ func (future *ConnectionMonitorsQueryFuture) Result(client ConnectionMonitorsCli
return
}
if !done {
+ cmqr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsQueryFuture")
return
}
@@ -5597,12 +5792,25 @@ func (future *ConnectionMonitorsQueryFuture) Result(client ConnectionMonitorsCli
// ConnectionMonitorsStartFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ConnectionMonitorsStartFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ConnectionMonitorsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ConnectionMonitorsStartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ConnectionMonitorsStartFuture) Result(client ConnectionMonitorsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ConnectionMonitorsStartFuture.Result.
+func (future *ConnectionMonitorsStartFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5610,6 +5818,7 @@ func (future *ConnectionMonitorsStartFuture) Result(client ConnectionMonitorsCli
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStartFuture")
return
}
@@ -5620,12 +5829,25 @@ func (future *ConnectionMonitorsStartFuture) Result(client ConnectionMonitorsCli
// ConnectionMonitorsStopFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ConnectionMonitorsStopFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ConnectionMonitorsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ConnectionMonitorsStopFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ConnectionMonitorsStopFuture) Result(client ConnectionMonitorsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ConnectionMonitorsStopFuture.Result.
+func (future *ConnectionMonitorsStopFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -5633,6 +5855,7 @@ func (future *ConnectionMonitorsStopFuture) Result(client ConnectionMonitorsClie
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStopFuture")
return
}
@@ -5739,6 +5962,12 @@ type ConnectivityHop struct {
Issues *[]ConnectivityIssue `json:"issues,omitempty"`
}
+// MarshalJSON is the custom marshaler for ConnectivityHop.
+func (ch ConnectivityHop) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ConnectivityInformation information on the connectivity status.
type ConnectivityInformation struct {
autorest.Response `json:"-"`
@@ -5758,6 +5987,12 @@ type ConnectivityInformation struct {
ProbesFailed *int32 `json:"probesFailed,omitempty"`
}
+// MarshalJSON is the custom marshaler for ConnectivityInformation.
+func (ci ConnectivityInformation) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ConnectivityIssue information about an issue encountered in the process of checking for connectivity.
type ConnectivityIssue struct {
// Origin - READ-ONLY; The origin of the issue. Possible values include: 'OriginLocal', 'OriginInbound', 'OriginOutbound'
@@ -5770,6 +6005,12 @@ type ConnectivityIssue struct {
Context *[]map[string]*string `json:"context,omitempty"`
}
+// MarshalJSON is the custom marshaler for ConnectivityIssue.
+func (ci ConnectivityIssue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ConnectivityParameters parameters that determine how the connectivity check will be performed.
type ConnectivityParameters struct {
Source *ConnectivitySource `json:"source,omitempty"`
@@ -6085,6 +6326,12 @@ type ContainerNetworkInterfaceIPConfigurationPropertiesFormat struct {
ProvisioningState *string `json:"provisioningState,omitempty"`
}
+// MarshalJSON is the custom marshaler for ContainerNetworkInterfaceIPConfigurationPropertiesFormat.
+func (cniicpf ContainerNetworkInterfaceIPConfigurationPropertiesFormat) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ContainerNetworkInterfacePropertiesFormat ...
type ContainerNetworkInterfacePropertiesFormat struct {
// ContainerNetworkInterfaceConfiguration - Container network interface configuration from which this container network interface is created.
@@ -6402,15 +6649,34 @@ type DdosProtectionPlanPropertiesFormat struct {
VirtualNetworks *[]SubResource `json:"virtualNetworks,omitempty"`
}
+// MarshalJSON is the custom marshaler for DdosProtectionPlanPropertiesFormat.
+func (dpppf DdosProtectionPlanPropertiesFormat) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// DdosProtectionPlansCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type DdosProtectionPlansCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DdosProtectionPlansClient) (DdosProtectionPlan, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DdosProtectionPlansCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *DdosProtectionPlansCreateOrUpdateFuture) Result(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) {
+// result is the default implementation for DdosProtectionPlansCreateOrUpdateFuture.Result.
+func (future *DdosProtectionPlansCreateOrUpdateFuture) result(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -6418,6 +6684,7 @@ func (future *DdosProtectionPlansCreateOrUpdateFuture) Result(client DdosProtect
return
}
if !done {
+ dpp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansCreateOrUpdateFuture")
return
}
@@ -6434,12 +6701,25 @@ func (future *DdosProtectionPlansCreateOrUpdateFuture) Result(client DdosProtect
// DdosProtectionPlansDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type DdosProtectionPlansDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DdosProtectionPlansClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DdosProtectionPlansDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *DdosProtectionPlansDeleteFuture) Result(client DdosProtectionPlansClient) (ar autorest.Response, err error) {
+// result is the default implementation for DdosProtectionPlansDeleteFuture.Result.
+func (future *DdosProtectionPlansDeleteFuture) result(client DdosProtectionPlansClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -6447,6 +6727,7 @@ func (future *DdosProtectionPlansDeleteFuture) Result(client DdosProtectionPlans
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansDeleteFuture")
return
}
@@ -7147,12 +7428,25 @@ func (erca *ExpressRouteCircuitAuthorization) UnmarshalJSON(body []byte) error {
// ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitAuthorizationsClient) (ExpressRouteCircuitAuthorization, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) Result(client ExpressRouteCircuitAuthorizationsClient) (erca ExpressRouteCircuitAuthorization, err error) {
+// result is the default implementation for ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture.Result.
+func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) result(client ExpressRouteCircuitAuthorizationsClient) (erca ExpressRouteCircuitAuthorization, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -7160,6 +7454,7 @@ func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) Result(clie
return
}
if !done {
+ erca.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture")
return
}
@@ -7176,12 +7471,25 @@ func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) Result(clie
// ExpressRouteCircuitAuthorizationsDeleteFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type ExpressRouteCircuitAuthorizationsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitAuthorizationsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) Result(client ExpressRouteCircuitAuthorizationsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ExpressRouteCircuitAuthorizationsDeleteFuture.Result.
+func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) result(client ExpressRouteCircuitAuthorizationsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -7189,6 +7497,7 @@ func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) Result(client Expre
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsDeleteFuture")
return
}
@@ -7473,12 +7782,25 @@ func (erccpf ExpressRouteCircuitConnectionPropertiesFormat) MarshalJSON() ([]byt
// ExpressRouteCircuitConnectionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type ExpressRouteCircuitConnectionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitConnectionsClient) (ExpressRouteCircuitConnection, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) Result(client ExpressRouteCircuitConnectionsClient) (ercc ExpressRouteCircuitConnection, err error) {
+// result is the default implementation for ExpressRouteCircuitConnectionsCreateOrUpdateFuture.Result.
+func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) result(client ExpressRouteCircuitConnectionsClient) (ercc ExpressRouteCircuitConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -7486,6 +7808,7 @@ func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) Result(client
return
}
if !done {
+ ercc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture")
return
}
@@ -7502,12 +7825,25 @@ func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) Result(client
// ExpressRouteCircuitConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteCircuitConnectionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitConnectionsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitConnectionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitConnectionsDeleteFuture) Result(client ExpressRouteCircuitConnectionsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ExpressRouteCircuitConnectionsDeleteFuture.Result.
+func (future *ExpressRouteCircuitConnectionsDeleteFuture) result(client ExpressRouteCircuitConnectionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -7515,6 +7851,7 @@ func (future *ExpressRouteCircuitConnectionsDeleteFuture) Result(client ExpressR
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsDeleteFuture")
return
}
@@ -7988,12 +8325,25 @@ type ExpressRouteCircuitPeeringPropertiesFormat struct {
// ExpressRouteCircuitPeeringsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type ExpressRouteCircuitPeeringsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitPeeringsClient) (ExpressRouteCircuitPeering, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) Result(client ExpressRouteCircuitPeeringsClient) (ercp ExpressRouteCircuitPeering, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ExpressRouteCircuitPeeringsCreateOrUpdateFuture.Result.
+func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) result(client ExpressRouteCircuitPeeringsClient) (ercp ExpressRouteCircuitPeering, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8001,6 +8351,7 @@ func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) Result(client Exp
return
}
if !done {
+ ercp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture")
return
}
@@ -8017,12 +8368,25 @@ func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) Result(client Exp
// ExpressRouteCircuitPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteCircuitPeeringsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitPeeringsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitPeeringsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitPeeringsDeleteFuture) Result(client ExpressRouteCircuitPeeringsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ExpressRouteCircuitPeeringsDeleteFuture.Result.
+func (future *ExpressRouteCircuitPeeringsDeleteFuture) result(client ExpressRouteCircuitPeeringsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8030,6 +8394,7 @@ func (future *ExpressRouteCircuitPeeringsDeleteFuture) Result(client ExpressRout
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsDeleteFuture")
return
}
@@ -8161,12 +8526,25 @@ type ExpressRouteCircuitsArpTableListResult struct {
// ExpressRouteCircuitsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteCircuitsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuit, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitsCreateOrUpdateFuture) Result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) {
+// result is the default implementation for ExpressRouteCircuitsCreateOrUpdateFuture.Result.
+func (future *ExpressRouteCircuitsCreateOrUpdateFuture) result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8174,6 +8552,7 @@ func (future *ExpressRouteCircuitsCreateOrUpdateFuture) Result(client ExpressRou
return
}
if !done {
+ erc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsCreateOrUpdateFuture")
return
}
@@ -8190,12 +8569,25 @@ func (future *ExpressRouteCircuitsCreateOrUpdateFuture) Result(client ExpressRou
// ExpressRouteCircuitsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteCircuitsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitsDeleteFuture) Result(client ExpressRouteCircuitsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ExpressRouteCircuitsDeleteFuture.Result.
+func (future *ExpressRouteCircuitsDeleteFuture) result(client ExpressRouteCircuitsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8203,6 +8595,7 @@ func (future *ExpressRouteCircuitsDeleteFuture) Result(client ExpressRouteCircui
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsDeleteFuture")
return
}
@@ -8234,12 +8627,25 @@ type ExpressRouteCircuitSku struct {
// ExpressRouteCircuitsListArpTableFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteCircuitsListArpTableFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsArpTableListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitsListArpTableFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitsListArpTableFuture) Result(client ExpressRouteCircuitsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) {
+// result is the default implementation for ExpressRouteCircuitsListArpTableFuture.Result.
+func (future *ExpressRouteCircuitsListArpTableFuture) result(client ExpressRouteCircuitsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8247,6 +8653,7 @@ func (future *ExpressRouteCircuitsListArpTableFuture) Result(client ExpressRoute
return
}
if !done {
+ ercatlr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListArpTableFuture")
return
}
@@ -8263,12 +8670,25 @@ func (future *ExpressRouteCircuitsListArpTableFuture) Result(client ExpressRoute
// ExpressRouteCircuitsListRoutesTableFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteCircuitsListRoutesTableFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsRoutesTableListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitsListRoutesTableFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitsListRoutesTableFuture) Result(client ExpressRouteCircuitsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) {
+// result is the default implementation for ExpressRouteCircuitsListRoutesTableFuture.Result.
+func (future *ExpressRouteCircuitsListRoutesTableFuture) result(client ExpressRouteCircuitsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8276,6 +8696,7 @@ func (future *ExpressRouteCircuitsListRoutesTableFuture) Result(client ExpressRo
return
}
if !done {
+ ercrtlr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableFuture")
return
}
@@ -8292,12 +8713,25 @@ func (future *ExpressRouteCircuitsListRoutesTableFuture) Result(client ExpressRo
// ExpressRouteCircuitsListRoutesTableSummaryFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type ExpressRouteCircuitsListRoutesTableSummaryFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsRoutesTableSummaryListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) Result(client ExpressRouteCircuitsClient) (ercrtslr ExpressRouteCircuitsRoutesTableSummaryListResult, err error) {
+// result is the default implementation for ExpressRouteCircuitsListRoutesTableSummaryFuture.Result.
+func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) result(client ExpressRouteCircuitsClient) (ercrtslr ExpressRouteCircuitsRoutesTableSummaryListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8305,6 +8739,7 @@ func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) Result(client Ex
return
}
if !done {
+ ercrtslr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableSummaryFuture")
return
}
@@ -8354,12 +8789,25 @@ type ExpressRouteCircuitStats struct {
// ExpressRouteCircuitsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteCircuitsUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuit, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCircuitsUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCircuitsUpdateTagsFuture) Result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) {
+// result is the default implementation for ExpressRouteCircuitsUpdateTagsFuture.Result.
+func (future *ExpressRouteCircuitsUpdateTagsFuture) result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8367,6 +8815,7 @@ func (future *ExpressRouteCircuitsUpdateTagsFuture) Result(client ExpressRouteCi
return
}
if !done {
+ erc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsUpdateTagsFuture")
return
}
@@ -8453,6 +8902,12 @@ type ExpressRouteConnectionID struct {
ID *string `json:"id,omitempty"`
}
+// MarshalJSON is the custom marshaler for ExpressRouteConnectionID.
+func (erci ExpressRouteConnectionID) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// ExpressRouteConnectionList expressRouteConnection list
type ExpressRouteConnectionList struct {
autorest.Response `json:"-"`
@@ -8490,12 +8945,25 @@ func (ercp ExpressRouteConnectionProperties) MarshalJSON() ([]byte, error) {
// ExpressRouteConnectionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type ExpressRouteConnectionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteConnectionsClient) (ExpressRouteConnection, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteConnectionsCreateOrUpdateFuture) Result(client ExpressRouteConnectionsClient) (erc ExpressRouteConnection, err error) {
+// result is the default implementation for ExpressRouteConnectionsCreateOrUpdateFuture.Result.
+func (future *ExpressRouteConnectionsCreateOrUpdateFuture) result(client ExpressRouteConnectionsClient) (erc ExpressRouteConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8503,6 +8971,7 @@ func (future *ExpressRouteConnectionsCreateOrUpdateFuture) Result(client Express
return
}
if !done {
+ erc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsCreateOrUpdateFuture")
return
}
@@ -8519,12 +8988,25 @@ func (future *ExpressRouteConnectionsCreateOrUpdateFuture) Result(client Express
// ExpressRouteConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteConnectionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteConnectionsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteConnectionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteConnectionsDeleteFuture) Result(client ExpressRouteConnectionsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ExpressRouteConnectionsDeleteFuture.Result.
+func (future *ExpressRouteConnectionsDeleteFuture) result(client ExpressRouteConnectionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -8532,6 +9014,7 @@ func (future *ExpressRouteConnectionsDeleteFuture) Result(client ExpressRouteCon
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsDeleteFuture")
return
}
@@ -9146,12 +9629,25 @@ func (erccpp ExpressRouteCrossConnectionPeeringProperties) MarshalJSON() ([]byte
// ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCrossConnectionPeeringsClient) (ExpressRouteCrossConnectionPeering, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) Result(client ExpressRouteCrossConnectionPeeringsClient) (erccp ExpressRouteCrossConnectionPeering, err error) {
+// result is the default implementation for ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture.Result.
+func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) result(client ExpressRouteCrossConnectionPeeringsClient) (erccp ExpressRouteCrossConnectionPeering, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9159,6 +9655,7 @@ func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) Result(cl
return
}
if !done {
+ erccp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture")
return
}
@@ -9175,12 +9672,25 @@ func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) Result(cl
// ExpressRouteCrossConnectionPeeringsDeleteFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type ExpressRouteCrossConnectionPeeringsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCrossConnectionPeeringsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) Result(client ExpressRouteCrossConnectionPeeringsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ExpressRouteCrossConnectionPeeringsDeleteFuture.Result.
+func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) result(client ExpressRouteCrossConnectionPeeringsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9188,6 +9698,7 @@ func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) Result(client Exp
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsDeleteFuture")
return
}
@@ -9258,12 +9769,25 @@ type ExpressRouteCrossConnectionRoutesTableSummary struct {
// ExpressRouteCrossConnectionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type ExpressRouteCrossConnectionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnection, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) Result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ExpressRouteCrossConnectionsCreateOrUpdateFuture.Result.
+func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9271,6 +9795,7 @@ func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) Result(client Ex
return
}
if !done {
+ ercc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsCreateOrUpdateFuture")
return
}
@@ -9287,12 +9812,25 @@ func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) Result(client Ex
// ExpressRouteCrossConnectionsListArpTableFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type ExpressRouteCrossConnectionsListArpTableFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCircuitsArpTableListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCrossConnectionsListArpTableFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCrossConnectionsListArpTableFuture) Result(client ExpressRouteCrossConnectionsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) {
+// result is the default implementation for ExpressRouteCrossConnectionsListArpTableFuture.Result.
+func (future *ExpressRouteCrossConnectionsListArpTableFuture) result(client ExpressRouteCrossConnectionsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9300,6 +9838,7 @@ func (future *ExpressRouteCrossConnectionsListArpTableFuture) Result(client Expr
return
}
if !done {
+ ercatlr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListArpTableFuture")
return
}
@@ -9316,12 +9855,25 @@ func (future *ExpressRouteCrossConnectionsListArpTableFuture) Result(client Expr
// ExpressRouteCrossConnectionsListRoutesTableFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type ExpressRouteCrossConnectionsListRoutesTableFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCircuitsRoutesTableListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) Result(client ExpressRouteCrossConnectionsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) {
+// result is the default implementation for ExpressRouteCrossConnectionsListRoutesTableFuture.Result.
+func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) result(client ExpressRouteCrossConnectionsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9329,6 +9881,7 @@ func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) Result(client E
return
}
if !done {
+ ercrtlr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableFuture")
return
}
@@ -9345,12 +9898,25 @@ func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) Result(client E
// ExpressRouteCrossConnectionsListRoutesTableSummaryFuture an abstraction for monitoring and retrieving
// the results of a long-running operation.
type ExpressRouteCrossConnectionsListRoutesTableSummaryFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnectionsRoutesTableSummaryListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) Result(client ExpressRouteCrossConnectionsClient) (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) {
+// result is the default implementation for ExpressRouteCrossConnectionsListRoutesTableSummaryFuture.Result.
+func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) result(client ExpressRouteCrossConnectionsClient) (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9358,6 +9924,7 @@ func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) Result(c
return
}
if !done {
+ erccrtslr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture")
return
}
@@ -9393,12 +9960,25 @@ func (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult) Marsha
// ExpressRouteCrossConnectionsUpdateTagsFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type ExpressRouteCrossConnectionsUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnection, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) Result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) {
+// result is the default implementation for ExpressRouteCrossConnectionsUpdateTagsFuture.Result.
+func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9406,6 +9986,7 @@ func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) Result(client Expres
return
}
if !done {
+ ercc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsUpdateTagsFuture")
return
}
@@ -9582,12 +10163,25 @@ type ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds struct {
// ExpressRouteGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteGatewaysCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteGatewaysClient) (ExpressRouteGateway, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteGatewaysCreateOrUpdateFuture) Result(client ExpressRouteGatewaysClient) (erg ExpressRouteGateway, err error) {
+// result is the default implementation for ExpressRouteGatewaysCreateOrUpdateFuture.Result.
+func (future *ExpressRouteGatewaysCreateOrUpdateFuture) result(client ExpressRouteGatewaysClient) (erg ExpressRouteGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9595,6 +10189,7 @@ func (future *ExpressRouteGatewaysCreateOrUpdateFuture) Result(client ExpressRou
return
}
if !done {
+ erg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysCreateOrUpdateFuture")
return
}
@@ -9611,12 +10206,25 @@ func (future *ExpressRouteGatewaysCreateOrUpdateFuture) Result(client ExpressRou
// ExpressRouteGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRouteGatewaysDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRouteGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRouteGatewaysDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRouteGatewaysDeleteFuture) Result(client ExpressRouteGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for ExpressRouteGatewaysDeleteFuture.Result.
+func (future *ExpressRouteGatewaysDeleteFuture) result(client ExpressRouteGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -9624,6 +10232,7 @@ func (future *ExpressRouteGatewaysDeleteFuture) Result(client ExpressRouteGatewa
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysDeleteFuture")
return
}
@@ -10220,12 +10829,25 @@ func (erppf ExpressRoutePortPropertiesFormat) MarshalJSON() ([]byte, error) {
// ExpressRoutePortsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRoutePortsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRoutePortsClient) (ExpressRoutePort, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRoutePortsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRoutePortsCreateOrUpdateFuture) Result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) {
+// result is the default implementation for ExpressRoutePortsCreateOrUpdateFuture.Result.
+func (future *ExpressRoutePortsCreateOrUpdateFuture) result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10233,6 +10855,7 @@ func (future *ExpressRoutePortsCreateOrUpdateFuture) Result(client ExpressRouteP
return
}
if !done {
+ erp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsCreateOrUpdateFuture")
return
}
@@ -10249,12 +10872,25 @@ func (future *ExpressRoutePortsCreateOrUpdateFuture) Result(client ExpressRouteP
// ExpressRoutePortsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ExpressRoutePortsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRoutePortsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRoutePortsDeleteFuture) Result(client ExpressRoutePortsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRoutePortsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ExpressRoutePortsDeleteFuture.Result.
+func (future *ExpressRoutePortsDeleteFuture) result(client ExpressRoutePortsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10262,6 +10898,7 @@ func (future *ExpressRoutePortsDeleteFuture) Result(client ExpressRoutePortsClie
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsDeleteFuture")
return
}
@@ -10381,9 +11018,15 @@ type ExpressRoutePortsLocationBandwidths struct {
ValueInGbps *int32 `json:"valueInGbps,omitempty"`
}
-// ExpressRoutePortsLocationListResult response for ListExpressRoutePortsLocations API service call.
-type ExpressRoutePortsLocationListResult struct {
- autorest.Response `json:"-"`
+// MarshalJSON is the custom marshaler for ExpressRoutePortsLocationBandwidths.
+func (erplb ExpressRoutePortsLocationBandwidths) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ExpressRoutePortsLocationListResult response for ListExpressRoutePortsLocations API service call.
+type ExpressRoutePortsLocationListResult struct {
+ autorest.Response `json:"-"`
// Value - The list of all ExpressRoutePort peering locations.
Value *[]ExpressRoutePortsLocation `json:"value,omitempty"`
// NextLink - The URL to get the next set of results.
@@ -10566,12 +11209,25 @@ func (erplpf ExpressRoutePortsLocationPropertiesFormat) MarshalJSON() ([]byte, e
// ExpressRoutePortsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ExpressRoutePortsUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ExpressRoutePortsClient) (ExpressRoutePort, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ExpressRoutePortsUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ExpressRoutePortsUpdateTagsFuture) Result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) {
+// result is the default implementation for ExpressRoutePortsUpdateTagsFuture.Result.
+func (future *ExpressRoutePortsUpdateTagsFuture) result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -10579,6 +11235,7 @@ func (future *ExpressRoutePortsUpdateTagsFuture) Result(client ExpressRoutePorts
return
}
if !done {
+ erp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsUpdateTagsFuture")
return
}
@@ -11126,6 +11783,12 @@ type GatewayRoute struct {
Weight *int32 `json:"weight,omitempty"`
}
+// MarshalJSON is the custom marshaler for GatewayRoute.
+func (gr GatewayRoute) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// GatewayRouteListResult list of virtual network gateway routes
type GatewayRouteListResult struct {
autorest.Response `json:"-"`
@@ -11659,12 +12322,25 @@ func (inrpf InboundNatRulePropertiesFormat) MarshalJSON() ([]byte, error) {
// InboundNatRulesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type InboundNatRulesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InboundNatRulesClient) (InboundNatRule, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InboundNatRulesCreateOrUpdateFuture) Result(client InboundNatRulesClient) (inr InboundNatRule, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InboundNatRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for InboundNatRulesCreateOrUpdateFuture.Result.
+func (future *InboundNatRulesCreateOrUpdateFuture) result(client InboundNatRulesClient) (inr InboundNatRule, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -11672,6 +12348,7 @@ func (future *InboundNatRulesCreateOrUpdateFuture) Result(client InboundNatRules
return
}
if !done {
+ inr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesCreateOrUpdateFuture")
return
}
@@ -11688,12 +12365,25 @@ func (future *InboundNatRulesCreateOrUpdateFuture) Result(client InboundNatRules
// InboundNatRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type InboundNatRulesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InboundNatRulesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InboundNatRulesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InboundNatRulesDeleteFuture) Result(client InboundNatRulesClient) (ar autorest.Response, err error) {
+// result is the default implementation for InboundNatRulesDeleteFuture.Result.
+func (future *InboundNatRulesDeleteFuture) result(client InboundNatRulesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -11701,6 +12391,7 @@ func (future *InboundNatRulesDeleteFuture) Result(client InboundNatRulesClient)
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesDeleteFuture")
return
}
@@ -12177,12 +12868,25 @@ func (iep InterfaceEndpointProperties) MarshalJSON() ([]byte, error) {
// InterfaceEndpointsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type InterfaceEndpointsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfaceEndpointsClient) (InterfaceEndpoint, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfaceEndpointsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfaceEndpointsCreateOrUpdateFuture) Result(client InterfaceEndpointsClient) (ie InterfaceEndpoint, err error) {
+// result is the default implementation for InterfaceEndpointsCreateOrUpdateFuture.Result.
+func (future *InterfaceEndpointsCreateOrUpdateFuture) result(client InterfaceEndpointsClient) (ie InterfaceEndpoint, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -12190,6 +12894,7 @@ func (future *InterfaceEndpointsCreateOrUpdateFuture) Result(client InterfaceEnd
return
}
if !done {
+ ie.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfaceEndpointsCreateOrUpdateFuture")
return
}
@@ -12206,12 +12911,25 @@ func (future *InterfaceEndpointsCreateOrUpdateFuture) Result(client InterfaceEnd
// InterfaceEndpointsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type InterfaceEndpointsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfaceEndpointsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfaceEndpointsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfaceEndpointsDeleteFuture) Result(client InterfaceEndpointsClient) (ar autorest.Response, err error) {
+// result is the default implementation for InterfaceEndpointsDeleteFuture.Result.
+func (future *InterfaceEndpointsDeleteFuture) result(client InterfaceEndpointsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -12219,6 +12937,7 @@ func (future *InterfaceEndpointsDeleteFuture) Result(client InterfaceEndpointsCl
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfaceEndpointsDeleteFuture")
return
}
@@ -12910,12 +13629,25 @@ func (ipf InterfacePropertiesFormat) MarshalJSON() ([]byte, error) {
// InterfacesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type InterfacesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfacesClient) (Interface, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfacesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfacesCreateOrUpdateFuture) Result(client InterfacesClient) (i Interface, err error) {
+// result is the default implementation for InterfacesCreateOrUpdateFuture.Result.
+func (future *InterfacesCreateOrUpdateFuture) result(client InterfacesClient) (i Interface, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -12923,6 +13655,7 @@ func (future *InterfacesCreateOrUpdateFuture) Result(client InterfacesClient) (i
return
}
if !done {
+ i.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfacesCreateOrUpdateFuture")
return
}
@@ -12939,12 +13672,25 @@ func (future *InterfacesCreateOrUpdateFuture) Result(client InterfacesClient) (i
// InterfacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type InterfacesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfacesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfacesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfacesDeleteFuture) Result(client InterfacesClient) (ar autorest.Response, err error) {
+// result is the default implementation for InterfacesDeleteFuture.Result.
+func (future *InterfacesDeleteFuture) result(client InterfacesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -12952,6 +13698,7 @@ func (future *InterfacesDeleteFuture) Result(client InterfacesClient) (ar autore
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfacesDeleteFuture")
return
}
@@ -12962,12 +13709,25 @@ func (future *InterfacesDeleteFuture) Result(client InterfacesClient) (ar autore
// InterfacesGetEffectiveRouteTableFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type InterfacesGetEffectiveRouteTableFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfacesClient) (EffectiveRouteListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfacesGetEffectiveRouteTableFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfacesGetEffectiveRouteTableFuture) Result(client InterfacesClient) (erlr EffectiveRouteListResult, err error) {
+// result is the default implementation for InterfacesGetEffectiveRouteTableFuture.Result.
+func (future *InterfacesGetEffectiveRouteTableFuture) result(client InterfacesClient) (erlr EffectiveRouteListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -12975,6 +13735,7 @@ func (future *InterfacesGetEffectiveRouteTableFuture) Result(client InterfacesCl
return
}
if !done {
+ erlr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfacesGetEffectiveRouteTableFuture")
return
}
@@ -12991,12 +13752,25 @@ func (future *InterfacesGetEffectiveRouteTableFuture) Result(client InterfacesCl
// InterfacesListEffectiveNetworkSecurityGroupsFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type InterfacesListEffectiveNetworkSecurityGroupsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfacesClient) (EffectiveNetworkSecurityGroupListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) Result(client InterfacesClient) (ensglr EffectiveNetworkSecurityGroupListResult, err error) {
+// result is the default implementation for InterfacesListEffectiveNetworkSecurityGroupsFuture.Result.
+func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) result(client InterfacesClient) (ensglr EffectiveNetworkSecurityGroupListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -13004,6 +13778,7 @@ func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) Result(client
return
}
if !done {
+ ensglr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfacesListEffectiveNetworkSecurityGroupsFuture")
return
}
@@ -13020,12 +13795,25 @@ func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) Result(client
// InterfacesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type InterfacesUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfacesClient) (Interface, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfacesUpdateTagsFuture) Result(client InterfacesClient) (i Interface, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfacesUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for InterfacesUpdateTagsFuture.Result.
+func (future *InterfacesUpdateTagsFuture) result(client InterfacesClient) (i Interface, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -13033,6 +13821,7 @@ func (future *InterfacesUpdateTagsFuture) Result(client InterfacesClient) (i Int
return
}
if !done {
+ i.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfacesUpdateTagsFuture")
return
}
@@ -13328,12 +14117,25 @@ func (itcpf InterfaceTapConfigurationPropertiesFormat) MarshalJSON() ([]byte, er
// InterfaceTapConfigurationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type InterfaceTapConfigurationsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfaceTapConfigurationsClient) (InterfaceTapConfiguration, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) Result(client InterfaceTapConfigurationsClient) (itc InterfaceTapConfiguration, err error) {
+// result is the default implementation for InterfaceTapConfigurationsCreateOrUpdateFuture.Result.
+func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) result(client InterfaceTapConfigurationsClient) (itc InterfaceTapConfiguration, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -13341,6 +14143,7 @@ func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) Result(client Inte
return
}
if !done {
+ itc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsCreateOrUpdateFuture")
return
}
@@ -13357,12 +14160,25 @@ func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) Result(client Inte
// InterfaceTapConfigurationsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type InterfaceTapConfigurationsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(InterfaceTapConfigurationsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *InterfaceTapConfigurationsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *InterfaceTapConfigurationsDeleteFuture) Result(client InterfaceTapConfigurationsClient) (ar autorest.Response, err error) {
+// result is the default implementation for InterfaceTapConfigurationsDeleteFuture.Result.
+func (future *InterfaceTapConfigurationsDeleteFuture) result(client InterfaceTapConfigurationsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -13370,6 +14186,7 @@ func (future *InterfaceTapConfigurationsDeleteFuture) Result(client InterfaceTap
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsDeleteFuture")
return
}
@@ -16083,12 +16900,25 @@ type LoadBalancerPropertiesFormat struct {
// LoadBalancersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type LoadBalancersCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LoadBalancersClient) (LoadBalancer, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LoadBalancersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LoadBalancersCreateOrUpdateFuture) Result(client LoadBalancersClient) (lb LoadBalancer, err error) {
+// result is the default implementation for LoadBalancersCreateOrUpdateFuture.Result.
+func (future *LoadBalancersCreateOrUpdateFuture) result(client LoadBalancersClient) (lb LoadBalancer, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -16096,6 +16926,7 @@ func (future *LoadBalancersCreateOrUpdateFuture) Result(client LoadBalancersClie
return
}
if !done {
+ lb.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.LoadBalancersCreateOrUpdateFuture")
return
}
@@ -16112,12 +16943,25 @@ func (future *LoadBalancersCreateOrUpdateFuture) Result(client LoadBalancersClie
// LoadBalancersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type LoadBalancersDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LoadBalancersClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LoadBalancersDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LoadBalancersDeleteFuture) Result(client LoadBalancersClient) (ar autorest.Response, err error) {
+// result is the default implementation for LoadBalancersDeleteFuture.Result.
+func (future *LoadBalancersDeleteFuture) result(client LoadBalancersClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -16125,6 +16969,7 @@ func (future *LoadBalancersDeleteFuture) Result(client LoadBalancersClient) (ar
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.LoadBalancersDeleteFuture")
return
}
@@ -16141,12 +16986,25 @@ type LoadBalancerSku struct {
// LoadBalancersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type LoadBalancersUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LoadBalancersClient) (LoadBalancer, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LoadBalancersUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LoadBalancersUpdateTagsFuture) Result(client LoadBalancersClient) (lb LoadBalancer, err error) {
+// result is the default implementation for LoadBalancersUpdateTagsFuture.Result.
+func (future *LoadBalancersUpdateTagsFuture) result(client LoadBalancersClient) (lb LoadBalancer, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -16154,6 +17012,7 @@ func (future *LoadBalancersUpdateTagsFuture) Result(client LoadBalancersClient)
return
}
if !done {
+ lb.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.LoadBalancersUpdateTagsFuture")
return
}
@@ -16599,12 +17458,25 @@ func (lngpf LocalNetworkGatewayPropertiesFormat) MarshalJSON() ([]byte, error) {
// LocalNetworkGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type LocalNetworkGatewaysCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LocalNetworkGatewaysClient) (LocalNetworkGateway, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LocalNetworkGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LocalNetworkGatewaysCreateOrUpdateFuture) Result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) {
+// result is the default implementation for LocalNetworkGatewaysCreateOrUpdateFuture.Result.
+func (future *LocalNetworkGatewaysCreateOrUpdateFuture) result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -16612,6 +17484,7 @@ func (future *LocalNetworkGatewaysCreateOrUpdateFuture) Result(client LocalNetwo
return
}
if !done {
+ lng.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysCreateOrUpdateFuture")
return
}
@@ -16628,12 +17501,25 @@ func (future *LocalNetworkGatewaysCreateOrUpdateFuture) Result(client LocalNetwo
// LocalNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type LocalNetworkGatewaysDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LocalNetworkGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LocalNetworkGatewaysDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LocalNetworkGatewaysDeleteFuture) Result(client LocalNetworkGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for LocalNetworkGatewaysDeleteFuture.Result.
+func (future *LocalNetworkGatewaysDeleteFuture) result(client LocalNetworkGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -16641,6 +17527,7 @@ func (future *LocalNetworkGatewaysDeleteFuture) Result(client LocalNetworkGatewa
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysDeleteFuture")
return
}
@@ -16651,12 +17538,25 @@ func (future *LocalNetworkGatewaysDeleteFuture) Result(client LocalNetworkGatewa
// LocalNetworkGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type LocalNetworkGatewaysUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(LocalNetworkGatewaysClient) (LocalNetworkGateway, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *LocalNetworkGatewaysUpdateTagsFuture) Result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *LocalNetworkGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for LocalNetworkGatewaysUpdateTagsFuture.Result.
+func (future *LocalNetworkGatewaysUpdateTagsFuture) result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -16664,6 +17564,7 @@ func (future *LocalNetworkGatewaysUpdateTagsFuture) Result(client LocalNetworkGa
return
}
if !done {
+ lng.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysUpdateTagsFuture")
return
}
@@ -16719,6 +17620,12 @@ type ManagedServiceIdentityUserAssignedIdentitiesValue struct {
ClientID *string `json:"clientId,omitempty"`
}
+// MarshalJSON is the custom marshaler for ManagedServiceIdentityUserAssignedIdentitiesValue.
+func (msiAiv ManagedServiceIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// MatchedRule matched rule.
type MatchedRule struct {
// RuleName - Name of the matched network security rule.
@@ -17303,12 +18210,25 @@ func (pvgp P2SVpnGatewayProperties) MarshalJSON() ([]byte, error) {
// P2sVpnGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type P2sVpnGatewaysCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(P2sVpnGatewaysClient) (P2SVpnGateway, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *P2sVpnGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *P2sVpnGatewaysCreateOrUpdateFuture) Result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) {
+// result is the default implementation for P2sVpnGatewaysCreateOrUpdateFuture.Result.
+func (future *P2sVpnGatewaysCreateOrUpdateFuture) result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -17316,6 +18236,7 @@ func (future *P2sVpnGatewaysCreateOrUpdateFuture) Result(client P2sVpnGatewaysCl
return
}
if !done {
+ pvg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysCreateOrUpdateFuture")
return
}
@@ -17332,12 +18253,25 @@ func (future *P2sVpnGatewaysCreateOrUpdateFuture) Result(client P2sVpnGatewaysCl
// P2sVpnGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type P2sVpnGatewaysDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(P2sVpnGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *P2sVpnGatewaysDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *P2sVpnGatewaysDeleteFuture) Result(client P2sVpnGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for P2sVpnGatewaysDeleteFuture.Result.
+func (future *P2sVpnGatewaysDeleteFuture) result(client P2sVpnGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -17345,6 +18279,7 @@ func (future *P2sVpnGatewaysDeleteFuture) Result(client P2sVpnGatewaysClient) (a
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysDeleteFuture")
return
}
@@ -17355,12 +18290,25 @@ func (future *P2sVpnGatewaysDeleteFuture) Result(client P2sVpnGatewaysClient) (a
// P2sVpnGatewaysGenerateVpnProfileFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type P2sVpnGatewaysGenerateVpnProfileFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(P2sVpnGatewaysClient) (VpnProfileResponse, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *P2sVpnGatewaysGenerateVpnProfileFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *P2sVpnGatewaysGenerateVpnProfileFuture) Result(client P2sVpnGatewaysClient) (vpr VpnProfileResponse, err error) {
+// result is the default implementation for P2sVpnGatewaysGenerateVpnProfileFuture.Result.
+func (future *P2sVpnGatewaysGenerateVpnProfileFuture) result(client P2sVpnGatewaysClient) (vpr VpnProfileResponse, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -17368,6 +18316,7 @@ func (future *P2sVpnGatewaysGenerateVpnProfileFuture) Result(client P2sVpnGatewa
return
}
if !done {
+ vpr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysGenerateVpnProfileFuture")
return
}
@@ -17384,12 +18333,25 @@ func (future *P2sVpnGatewaysGenerateVpnProfileFuture) Result(client P2sVpnGatewa
// P2sVpnGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type P2sVpnGatewaysUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(P2sVpnGatewaysClient) (P2SVpnGateway, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *P2sVpnGatewaysUpdateTagsFuture) Result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *P2sVpnGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for P2sVpnGatewaysUpdateTagsFuture.Result.
+func (future *P2sVpnGatewaysUpdateTagsFuture) result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -17397,6 +18359,7 @@ func (future *P2sVpnGatewaysUpdateTagsFuture) Result(client P2sVpnGatewaysClient
return
}
if !done {
+ pvg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysUpdateTagsFuture")
return
}
@@ -17761,12 +18724,25 @@ func (pvscp P2SVpnServerConfigurationProperties) MarshalJSON() ([]byte, error) {
// P2sVpnServerConfigurationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type P2sVpnServerConfigurationsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(P2sVpnServerConfigurationsClient) (P2SVpnServerConfiguration, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) Result(client P2sVpnServerConfigurationsClient) (pvsc P2SVpnServerConfiguration, err error) {
+// result is the default implementation for P2sVpnServerConfigurationsCreateOrUpdateFuture.Result.
+func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) result(client P2sVpnServerConfigurationsClient) (pvsc P2SVpnServerConfiguration, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -17774,6 +18750,7 @@ func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) Result(client P2sV
return
}
if !done {
+ pvsc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsCreateOrUpdateFuture")
return
}
@@ -17790,12 +18767,25 @@ func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) Result(client P2sV
// P2sVpnServerConfigurationsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type P2sVpnServerConfigurationsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(P2sVpnServerConfigurationsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *P2sVpnServerConfigurationsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *P2sVpnServerConfigurationsDeleteFuture) Result(client P2sVpnServerConfigurationsClient) (ar autorest.Response, err error) {
+// result is the default implementation for P2sVpnServerConfigurationsDeleteFuture.Result.
+func (future *P2sVpnServerConfigurationsDeleteFuture) result(client P2sVpnServerConfigurationsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -17803,6 +18793,7 @@ func (future *P2sVpnServerConfigurationsDeleteFuture) Result(client P2sVpnServer
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsDeleteFuture")
return
}
@@ -18192,12 +19183,25 @@ type PacketCaptureResultProperties struct {
// PacketCapturesCreateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type PacketCapturesCreateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PacketCapturesClient) (PacketCaptureResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PacketCapturesCreateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PacketCapturesCreateFuture) Result(client PacketCapturesClient) (pcr PacketCaptureResult, err error) {
+// result is the default implementation for PacketCapturesCreateFuture.Result.
+func (future *PacketCapturesCreateFuture) result(client PacketCapturesClient) (pcr PacketCaptureResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -18205,6 +19209,7 @@ func (future *PacketCapturesCreateFuture) Result(client PacketCapturesClient) (p
return
}
if !done {
+ pcr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PacketCapturesCreateFuture")
return
}
@@ -18221,12 +19226,25 @@ func (future *PacketCapturesCreateFuture) Result(client PacketCapturesClient) (p
// PacketCapturesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type PacketCapturesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PacketCapturesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PacketCapturesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PacketCapturesDeleteFuture) Result(client PacketCapturesClient) (ar autorest.Response, err error) {
+// result is the default implementation for PacketCapturesDeleteFuture.Result.
+func (future *PacketCapturesDeleteFuture) result(client PacketCapturesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -18234,6 +19252,7 @@ func (future *PacketCapturesDeleteFuture) Result(client PacketCapturesClient) (a
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PacketCapturesDeleteFuture")
return
}
@@ -18244,12 +19263,25 @@ func (future *PacketCapturesDeleteFuture) Result(client PacketCapturesClient) (a
// PacketCapturesGetStatusFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type PacketCapturesGetStatusFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PacketCapturesClient) (PacketCaptureQueryStatusResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PacketCapturesGetStatusFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PacketCapturesGetStatusFuture) Result(client PacketCapturesClient) (pcqsr PacketCaptureQueryStatusResult, err error) {
+// result is the default implementation for PacketCapturesGetStatusFuture.Result.
+func (future *PacketCapturesGetStatusFuture) result(client PacketCapturesClient) (pcqsr PacketCaptureQueryStatusResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -18257,6 +19289,7 @@ func (future *PacketCapturesGetStatusFuture) Result(client PacketCapturesClient)
return
}
if !done {
+ pcqsr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PacketCapturesGetStatusFuture")
return
}
@@ -18273,12 +19306,25 @@ func (future *PacketCapturesGetStatusFuture) Result(client PacketCapturesClient)
// PacketCapturesStopFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type PacketCapturesStopFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PacketCapturesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PacketCapturesStopFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PacketCapturesStopFuture) Result(client PacketCapturesClient) (ar autorest.Response, err error) {
+// result is the default implementation for PacketCapturesStopFuture.Result.
+func (future *PacketCapturesStopFuture) result(client PacketCapturesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -18286,6 +19332,7 @@ func (future *PacketCapturesStopFuture) Result(client PacketCapturesClient) (ar
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PacketCapturesStopFuture")
return
}
@@ -18904,12 +19951,25 @@ func (ppf ProfilePropertiesFormat) MarshalJSON() ([]byte, error) {
// ProfilesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ProfilesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ProfilesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ProfilesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ProfilesDeleteFuture) Result(client ProfilesClient) (ar autorest.Response, err error) {
+// result is the default implementation for ProfilesDeleteFuture.Result.
+func (future *ProfilesDeleteFuture) result(client ProfilesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -18917,6 +19977,7 @@ func (future *ProfilesDeleteFuture) Result(client ProfilesClient) (ar autorest.R
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ProfilesDeleteFuture")
return
}
@@ -19088,12 +20149,25 @@ type PublicIPAddressDNSSettings struct {
// PublicIPAddressesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type PublicIPAddressesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PublicIPAddressesClient) (PublicIPAddress, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PublicIPAddressesCreateOrUpdateFuture) Result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PublicIPAddressesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for PublicIPAddressesCreateOrUpdateFuture.Result.
+func (future *PublicIPAddressesCreateOrUpdateFuture) result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -19101,6 +20175,7 @@ func (future *PublicIPAddressesCreateOrUpdateFuture) Result(client PublicIPAddre
return
}
if !done {
+ pia.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesCreateOrUpdateFuture")
return
}
@@ -19117,12 +20192,25 @@ func (future *PublicIPAddressesCreateOrUpdateFuture) Result(client PublicIPAddre
// PublicIPAddressesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type PublicIPAddressesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PublicIPAddressesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PublicIPAddressesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PublicIPAddressesDeleteFuture) Result(client PublicIPAddressesClient) (ar autorest.Response, err error) {
+// result is the default implementation for PublicIPAddressesDeleteFuture.Result.
+func (future *PublicIPAddressesDeleteFuture) result(client PublicIPAddressesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -19130,6 +20218,7 @@ func (future *PublicIPAddressesDeleteFuture) Result(client PublicIPAddressesClie
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesDeleteFuture")
return
}
@@ -19140,12 +20229,25 @@ func (future *PublicIPAddressesDeleteFuture) Result(client PublicIPAddressesClie
// PublicIPAddressesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type PublicIPAddressesUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PublicIPAddressesClient) (PublicIPAddress, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PublicIPAddressesUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PublicIPAddressesUpdateTagsFuture) Result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) {
+// result is the default implementation for PublicIPAddressesUpdateTagsFuture.Result.
+func (future *PublicIPAddressesUpdateTagsFuture) result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -19153,6 +20255,7 @@ func (future *PublicIPAddressesUpdateTagsFuture) Result(client PublicIPAddresses
return
}
if !done {
+ pia.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesUpdateTagsFuture")
return
}
@@ -19537,12 +20640,25 @@ func (pip *PublicIPPrefix) UnmarshalJSON(body []byte) error {
// PublicIPPrefixesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type PublicIPPrefixesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PublicIPPrefixesClient) (PublicIPPrefix, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PublicIPPrefixesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PublicIPPrefixesCreateOrUpdateFuture) Result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) {
+// result is the default implementation for PublicIPPrefixesCreateOrUpdateFuture.Result.
+func (future *PublicIPPrefixesCreateOrUpdateFuture) result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -19550,6 +20666,7 @@ func (future *PublicIPPrefixesCreateOrUpdateFuture) Result(client PublicIPPrefix
return
}
if !done {
+ pip.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesCreateOrUpdateFuture")
return
}
@@ -19566,12 +20683,25 @@ func (future *PublicIPPrefixesCreateOrUpdateFuture) Result(client PublicIPPrefix
// PublicIPPrefixesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type PublicIPPrefixesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PublicIPPrefixesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PublicIPPrefixesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PublicIPPrefixesDeleteFuture) Result(client PublicIPPrefixesClient) (ar autorest.Response, err error) {
+// result is the default implementation for PublicIPPrefixesDeleteFuture.Result.
+func (future *PublicIPPrefixesDeleteFuture) result(client PublicIPPrefixesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -19579,6 +20709,7 @@ func (future *PublicIPPrefixesDeleteFuture) Result(client PublicIPPrefixesClient
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesDeleteFuture")
return
}
@@ -19589,12 +20720,25 @@ func (future *PublicIPPrefixesDeleteFuture) Result(client PublicIPPrefixesClient
// PublicIPPrefixesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type PublicIPPrefixesUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(PublicIPPrefixesClient) (PublicIPPrefix, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PublicIPPrefixesUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *PublicIPPrefixesUpdateTagsFuture) Result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) {
+// result is the default implementation for PublicIPPrefixesUpdateTagsFuture.Result.
+func (future *PublicIPPrefixesUpdateTagsFuture) result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -19602,6 +20746,7 @@ func (future *PublicIPPrefixesUpdateTagsFuture) Result(client PublicIPPrefixesCl
return
}
if !done {
+ pip.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesUpdateTagsFuture")
return
}
@@ -20634,12 +21779,25 @@ func (rfrpf RouteFilterRulePropertiesFormat) MarshalJSON() ([]byte, error) {
// RouteFilterRulesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type RouteFilterRulesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteFilterRulesClient) (RouteFilterRule, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteFilterRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteFilterRulesCreateOrUpdateFuture) Result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) {
+// result is the default implementation for RouteFilterRulesCreateOrUpdateFuture.Result.
+func (future *RouteFilterRulesCreateOrUpdateFuture) result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -20647,6 +21805,7 @@ func (future *RouteFilterRulesCreateOrUpdateFuture) Result(client RouteFilterRul
return
}
if !done {
+ rfr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesCreateOrUpdateFuture")
return
}
@@ -20663,12 +21822,25 @@ func (future *RouteFilterRulesCreateOrUpdateFuture) Result(client RouteFilterRul
// RouteFilterRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RouteFilterRulesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteFilterRulesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteFilterRulesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteFilterRulesDeleteFuture) Result(client RouteFilterRulesClient) (ar autorest.Response, err error) {
+// result is the default implementation for RouteFilterRulesDeleteFuture.Result.
+func (future *RouteFilterRulesDeleteFuture) result(client RouteFilterRulesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -20676,6 +21848,7 @@ func (future *RouteFilterRulesDeleteFuture) Result(client RouteFilterRulesClient
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesDeleteFuture")
return
}
@@ -20686,12 +21859,25 @@ func (future *RouteFilterRulesDeleteFuture) Result(client RouteFilterRulesClient
// RouteFilterRulesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RouteFilterRulesUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteFilterRulesClient) (RouteFilterRule, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteFilterRulesUpdateFuture) Result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteFilterRulesUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for RouteFilterRulesUpdateFuture.Result.
+func (future *RouteFilterRulesUpdateFuture) result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -20699,6 +21885,7 @@ func (future *RouteFilterRulesUpdateFuture) Result(client RouteFilterRulesClient
return
}
if !done {
+ rfr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesUpdateFuture")
return
}
@@ -20715,12 +21902,25 @@ func (future *RouteFilterRulesUpdateFuture) Result(client RouteFilterRulesClient
// RouteFiltersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type RouteFiltersCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteFiltersClient) (RouteFilter, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteFiltersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteFiltersCreateOrUpdateFuture) Result(client RouteFiltersClient) (rf RouteFilter, err error) {
+// result is the default implementation for RouteFiltersCreateOrUpdateFuture.Result.
+func (future *RouteFiltersCreateOrUpdateFuture) result(client RouteFiltersClient) (rf RouteFilter, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -20728,6 +21928,7 @@ func (future *RouteFiltersCreateOrUpdateFuture) Result(client RouteFiltersClient
return
}
if !done {
+ rf.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteFiltersCreateOrUpdateFuture")
return
}
@@ -20744,12 +21945,25 @@ func (future *RouteFiltersCreateOrUpdateFuture) Result(client RouteFiltersClient
// RouteFiltersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RouteFiltersDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteFiltersClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteFiltersDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteFiltersDeleteFuture) Result(client RouteFiltersClient) (ar autorest.Response, err error) {
+// result is the default implementation for RouteFiltersDeleteFuture.Result.
+func (future *RouteFiltersDeleteFuture) result(client RouteFiltersClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -20757,6 +21971,7 @@ func (future *RouteFiltersDeleteFuture) Result(client RouteFiltersClient) (ar au
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteFiltersDeleteFuture")
return
}
@@ -20767,12 +21982,25 @@ func (future *RouteFiltersDeleteFuture) Result(client RouteFiltersClient) (ar au
// RouteFiltersUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RouteFiltersUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteFiltersClient) (RouteFilter, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteFiltersUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteFiltersUpdateFuture) Result(client RouteFiltersClient) (rf RouteFilter, err error) {
+// result is the default implementation for RouteFiltersUpdateFuture.Result.
+func (future *RouteFiltersUpdateFuture) result(client RouteFiltersClient) (rf RouteFilter, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -20780,6 +22008,7 @@ func (future *RouteFiltersUpdateFuture) Result(client RouteFiltersClient) (rf Ro
return
}
if !done {
+ rf.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteFiltersUpdateFuture")
return
}
@@ -20967,12 +22196,25 @@ type RoutePropertiesFormat struct {
// RoutesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RoutesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RoutesClient) (Route, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RoutesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RoutesCreateOrUpdateFuture) Result(client RoutesClient) (r Route, err error) {
+// result is the default implementation for RoutesCreateOrUpdateFuture.Result.
+func (future *RoutesCreateOrUpdateFuture) result(client RoutesClient) (r Route, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -20980,6 +22222,7 @@ func (future *RoutesCreateOrUpdateFuture) Result(client RoutesClient) (r Route,
return
}
if !done {
+ r.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RoutesCreateOrUpdateFuture")
return
}
@@ -20995,12 +22238,25 @@ func (future *RoutesCreateOrUpdateFuture) Result(client RoutesClient) (r Route,
// RoutesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type RoutesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RoutesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RoutesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RoutesDeleteFuture) Result(client RoutesClient) (ar autorest.Response, err error) {
+// result is the default implementation for RoutesDeleteFuture.Result.
+func (future *RoutesDeleteFuture) result(client RoutesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -21008,6 +22264,7 @@ func (future *RoutesDeleteFuture) Result(client RoutesClient) (ar autorest.Respo
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RoutesDeleteFuture")
return
}
@@ -21322,12 +22579,25 @@ func (rtpf RouteTablePropertiesFormat) MarshalJSON() ([]byte, error) {
// RouteTablesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type RouteTablesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteTablesClient) (RouteTable, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteTablesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteTablesCreateOrUpdateFuture) Result(client RouteTablesClient) (rt RouteTable, err error) {
+// result is the default implementation for RouteTablesCreateOrUpdateFuture.Result.
+func (future *RouteTablesCreateOrUpdateFuture) result(client RouteTablesClient) (rt RouteTable, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -21335,6 +22605,7 @@ func (future *RouteTablesCreateOrUpdateFuture) Result(client RouteTablesClient)
return
}
if !done {
+ rt.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteTablesCreateOrUpdateFuture")
return
}
@@ -21351,12 +22622,25 @@ func (future *RouteTablesCreateOrUpdateFuture) Result(client RouteTablesClient)
// RouteTablesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RouteTablesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteTablesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteTablesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteTablesDeleteFuture) Result(client RouteTablesClient) (ar autorest.Response, err error) {
+// result is the default implementation for RouteTablesDeleteFuture.Result.
+func (future *RouteTablesDeleteFuture) result(client RouteTablesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -21364,6 +22648,7 @@ func (future *RouteTablesDeleteFuture) Result(client RouteTablesClient) (ar auto
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteTablesDeleteFuture")
return
}
@@ -21374,12 +22659,25 @@ func (future *RouteTablesDeleteFuture) Result(client RouteTablesClient) (ar auto
// RouteTablesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RouteTablesUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(RouteTablesClient) (RouteTable, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *RouteTablesUpdateTagsFuture) Result(client RouteTablesClient) (rt RouteTable, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *RouteTablesUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for RouteTablesUpdateTagsFuture.Result.
+func (future *RouteTablesUpdateTagsFuture) result(client RouteTablesClient) (rt RouteTable, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -21387,6 +22685,7 @@ func (future *RouteTablesUpdateTagsFuture) Result(client RouteTablesClient) (rt
return
}
if !done {
+ rt.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.RouteTablesUpdateTagsFuture")
return
}
@@ -21738,12 +23037,25 @@ func (sgr SecurityGroupResult) MarshalJSON() ([]byte, error) {
// SecurityGroupsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type SecurityGroupsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SecurityGroupsClient) (SecurityGroup, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SecurityGroupsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SecurityGroupsCreateOrUpdateFuture) Result(client SecurityGroupsClient) (sg SecurityGroup, err error) {
+// result is the default implementation for SecurityGroupsCreateOrUpdateFuture.Result.
+func (future *SecurityGroupsCreateOrUpdateFuture) result(client SecurityGroupsClient) (sg SecurityGroup, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -21751,6 +23063,7 @@ func (future *SecurityGroupsCreateOrUpdateFuture) Result(client SecurityGroupsCl
return
}
if !done {
+ sg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsCreateOrUpdateFuture")
return
}
@@ -21767,12 +23080,25 @@ func (future *SecurityGroupsCreateOrUpdateFuture) Result(client SecurityGroupsCl
// SecurityGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SecurityGroupsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SecurityGroupsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SecurityGroupsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SecurityGroupsDeleteFuture) Result(client SecurityGroupsClient) (ar autorest.Response, err error) {
+// result is the default implementation for SecurityGroupsDeleteFuture.Result.
+func (future *SecurityGroupsDeleteFuture) result(client SecurityGroupsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -21780,6 +23106,7 @@ func (future *SecurityGroupsDeleteFuture) Result(client SecurityGroupsClient) (a
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsDeleteFuture")
return
}
@@ -21790,19 +23117,33 @@ func (future *SecurityGroupsDeleteFuture) Result(client SecurityGroupsClient) (a
// SecurityGroupsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type SecurityGroupsUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SecurityGroupsClient) (SecurityGroup, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SecurityGroupsUpdateTagsFuture) Result(client SecurityGroupsClient) (sg SecurityGroup, err error) {
- var done bool
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SecurityGroupsUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for SecurityGroupsUpdateTagsFuture.Result.
+func (future *SecurityGroupsUpdateTagsFuture) result(client SecurityGroupsClient) (sg SecurityGroup, err error) {
+ var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
+ sg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsUpdateTagsFuture")
return
}
@@ -22120,12 +23461,25 @@ type SecurityRulePropertiesFormat struct {
// SecurityRulesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type SecurityRulesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SecurityRulesClient) (SecurityRule, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SecurityRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SecurityRulesCreateOrUpdateFuture) Result(client SecurityRulesClient) (sr SecurityRule, err error) {
+// result is the default implementation for SecurityRulesCreateOrUpdateFuture.Result.
+func (future *SecurityRulesCreateOrUpdateFuture) result(client SecurityRulesClient) (sr SecurityRule, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -22133,6 +23487,7 @@ func (future *SecurityRulesCreateOrUpdateFuture) Result(client SecurityRulesClie
return
}
if !done {
+ sr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.SecurityRulesCreateOrUpdateFuture")
return
}
@@ -22149,12 +23504,25 @@ func (future *SecurityRulesCreateOrUpdateFuture) Result(client SecurityRulesClie
// SecurityRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SecurityRulesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SecurityRulesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SecurityRulesDeleteFuture) Result(client SecurityRulesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SecurityRulesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for SecurityRulesDeleteFuture.Result.
+func (future *SecurityRulesDeleteFuture) result(client SecurityRulesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -22162,6 +23530,7 @@ func (future *SecurityRulesDeleteFuture) Result(client SecurityRulesClient) (ar
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.SecurityRulesDeleteFuture")
return
}
@@ -22310,12 +23679,25 @@ func (sdpf ServiceDelegationPropertiesFormat) MarshalJSON() ([]byte, error) {
// ServiceEndpointPoliciesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type ServiceEndpointPoliciesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ServiceEndpointPoliciesClient) (ServiceEndpointPolicy, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) Result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) {
+// result is the default implementation for ServiceEndpointPoliciesCreateOrUpdateFuture.Result.
+func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -22323,6 +23705,7 @@ func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) Result(client Service
return
}
if !done {
+ sep.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesCreateOrUpdateFuture")
return
}
@@ -22339,12 +23722,25 @@ func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) Result(client Service
// ServiceEndpointPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ServiceEndpointPoliciesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ServiceEndpointPoliciesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ServiceEndpointPoliciesDeleteFuture) Result(client ServiceEndpointPoliciesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ServiceEndpointPoliciesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ServiceEndpointPoliciesDeleteFuture.Result.
+func (future *ServiceEndpointPoliciesDeleteFuture) result(client ServiceEndpointPoliciesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -22352,6 +23748,7 @@ func (future *ServiceEndpointPoliciesDeleteFuture) Result(client ServiceEndpoint
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesDeleteFuture")
return
}
@@ -22362,12 +23759,25 @@ func (future *ServiceEndpointPoliciesDeleteFuture) Result(client ServiceEndpoint
// ServiceEndpointPoliciesUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type ServiceEndpointPoliciesUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ServiceEndpointPoliciesClient) (ServiceEndpointPolicy, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ServiceEndpointPoliciesUpdateFuture) Result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ServiceEndpointPoliciesUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ServiceEndpointPoliciesUpdateFuture.Result.
+func (future *ServiceEndpointPoliciesUpdateFuture) result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -22375,6 +23785,7 @@ func (future *ServiceEndpointPoliciesUpdateFuture) Result(client ServiceEndpoint
return
}
if !done {
+ sep.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesUpdateFuture")
return
}
@@ -22779,12 +24190,25 @@ func (sepdpf ServiceEndpointPolicyDefinitionPropertiesFormat) MarshalJSON() ([]b
// ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ServiceEndpointPolicyDefinitionsClient) (ServiceEndpointPolicyDefinition, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) Result(client ServiceEndpointPolicyDefinitionsClient) (sepd ServiceEndpointPolicyDefinition, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture.Result.
+func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) result(client ServiceEndpointPolicyDefinitionsClient) (sepd ServiceEndpointPolicyDefinition, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -22792,6 +24216,7 @@ func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) Result(clien
return
}
if !done {
+ sepd.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture")
return
}
@@ -22808,12 +24233,25 @@ func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) Result(clien
// ServiceEndpointPolicyDefinitionsDeleteFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type ServiceEndpointPolicyDefinitionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(ServiceEndpointPolicyDefinitionsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) Result(client ServiceEndpointPolicyDefinitionsClient) (ar autorest.Response, err error) {
+// result is the default implementation for ServiceEndpointPolicyDefinitionsDeleteFuture.Result.
+func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) result(client ServiceEndpointPolicyDefinitionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -22821,6 +24259,7 @@ func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) Result(client Servic
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsDeleteFuture")
return
}
@@ -23364,12 +24803,25 @@ func (spf SubnetPropertiesFormat) MarshalJSON() ([]byte, error) {
// SubnetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SubnetsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SubnetsClient) (Subnet, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SubnetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SubnetsCreateOrUpdateFuture) Result(client SubnetsClient) (s Subnet, err error) {
+// result is the default implementation for SubnetsCreateOrUpdateFuture.Result.
+func (future *SubnetsCreateOrUpdateFuture) result(client SubnetsClient) (s Subnet, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -23377,6 +24829,7 @@ func (future *SubnetsCreateOrUpdateFuture) Result(client SubnetsClient) (s Subne
return
}
if !done {
+ s.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.SubnetsCreateOrUpdateFuture")
return
}
@@ -23393,12 +24846,25 @@ func (future *SubnetsCreateOrUpdateFuture) Result(client SubnetsClient) (s Subne
// SubnetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type SubnetsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(SubnetsClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *SubnetsDeleteFuture) Result(client SubnetsClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *SubnetsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for SubnetsDeleteFuture.Result.
+func (future *SubnetsDeleteFuture) result(client SubnetsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -23406,6 +24872,7 @@ func (future *SubnetsDeleteFuture) Result(client SubnetsClient) (ar autorest.Res
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.SubnetsDeleteFuture")
return
}
@@ -23619,6 +25086,12 @@ type TunnelConnectionHealth struct {
LastConnectionEstablishedUtcTime *string `json:"lastConnectionEstablishedUtcTime,omitempty"`
}
+// MarshalJSON is the custom marshaler for TunnelConnectionHealth.
+func (tch TunnelConnectionHealth) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// Usage describes network resource usage.
type Usage struct {
// ID - READ-ONLY; Resource identifier.
@@ -24004,12 +25477,25 @@ type VirtualHubRouteTable struct {
// VirtualHubsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualHubsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualHubsClient) (VirtualHub, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualHubsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualHubsCreateOrUpdateFuture) Result(client VirtualHubsClient) (vh VirtualHub, err error) {
+// result is the default implementation for VirtualHubsCreateOrUpdateFuture.Result.
+func (future *VirtualHubsCreateOrUpdateFuture) result(client VirtualHubsClient) (vh VirtualHub, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -24017,6 +25503,7 @@ func (future *VirtualHubsCreateOrUpdateFuture) Result(client VirtualHubsClient)
return
}
if !done {
+ vh.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualHubsCreateOrUpdateFuture")
return
}
@@ -24033,12 +25520,25 @@ func (future *VirtualHubsCreateOrUpdateFuture) Result(client VirtualHubsClient)
// VirtualHubsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualHubsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualHubsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualHubsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualHubsDeleteFuture) Result(client VirtualHubsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualHubsDeleteFuture.Result.
+func (future *VirtualHubsDeleteFuture) result(client VirtualHubsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -24046,6 +25546,7 @@ func (future *VirtualHubsDeleteFuture) Result(client VirtualHubsClient) (ar auto
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualHubsDeleteFuture")
return
}
@@ -24056,12 +25557,25 @@ func (future *VirtualHubsDeleteFuture) Result(client VirtualHubsClient) (ar auto
// VirtualHubsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualHubsUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualHubsClient) (VirtualHub, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualHubsUpdateTagsFuture) Result(client VirtualHubsClient) (vh VirtualHub, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualHubsUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualHubsUpdateTagsFuture.Result.
+func (future *VirtualHubsUpdateTagsFuture) result(client VirtualHubsClient) (vh VirtualHub, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -24069,6 +25583,7 @@ func (future *VirtualHubsUpdateTagsFuture) Result(client VirtualHubsClient) (vh
return
}
if !done {
+ vh.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualHubsUpdateTagsFuture")
return
}
@@ -24913,12 +26428,25 @@ func (vngcpf VirtualNetworkGatewayConnectionPropertiesFormat) MarshalJSON() ([]b
// VirtualNetworkGatewayConnectionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewayConnectionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewayConnectionsClient) (VirtualNetworkGatewayConnection, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) Result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewayConnectionsCreateOrUpdateFuture.Result.
+func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -24926,6 +26454,7 @@ func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) Result(clien
return
}
if !done {
+ vngc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture")
return
}
@@ -24942,12 +26471,25 @@ func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) Result(clien
// VirtualNetworkGatewayConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualNetworkGatewayConnectionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewayConnectionsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewayConnectionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewayConnectionsDeleteFuture) Result(client VirtualNetworkGatewayConnectionsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualNetworkGatewayConnectionsDeleteFuture.Result.
+func (future *VirtualNetworkGatewayConnectionsDeleteFuture) result(client VirtualNetworkGatewayConnectionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -24955,6 +26497,7 @@ func (future *VirtualNetworkGatewayConnectionsDeleteFuture) Result(client Virtua
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsDeleteFuture")
return
}
@@ -24965,12 +26508,25 @@ func (future *VirtualNetworkGatewayConnectionsDeleteFuture) Result(client Virtua
// VirtualNetworkGatewayConnectionsResetSharedKeyFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewayConnectionsResetSharedKeyFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewayConnectionsClient) (ConnectionResetSharedKey, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) Result(client VirtualNetworkGatewayConnectionsClient) (crsk ConnectionResetSharedKey, err error) {
+// result is the default implementation for VirtualNetworkGatewayConnectionsResetSharedKeyFuture.Result.
+func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) result(client VirtualNetworkGatewayConnectionsClient) (crsk ConnectionResetSharedKey, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -24978,6 +26534,7 @@ func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) Result(clien
return
}
if !done {
+ crsk.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture")
return
}
@@ -24994,12 +26551,25 @@ func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) Result(clien
// VirtualNetworkGatewayConnectionsSetSharedKeyFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewayConnectionsSetSharedKeyFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewayConnectionsClient) (ConnectionSharedKey, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) Result(client VirtualNetworkGatewayConnectionsClient) (csk ConnectionSharedKey, err error) {
+// result is the default implementation for VirtualNetworkGatewayConnectionsSetSharedKeyFuture.Result.
+func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) result(client VirtualNetworkGatewayConnectionsClient) (csk ConnectionSharedKey, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25007,6 +26577,7 @@ func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) Result(client
return
}
if !done {
+ csk.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture")
return
}
@@ -25023,12 +26594,25 @@ func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) Result(client
// VirtualNetworkGatewayConnectionsUpdateTagsFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewayConnectionsUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewayConnectionsClient) (VirtualNetworkGatewayConnection, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) Result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) {
+// result is the default implementation for VirtualNetworkGatewayConnectionsUpdateTagsFuture.Result.
+func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25036,6 +26620,7 @@ func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) Result(client Vi
return
}
if !done {
+ vngc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsUpdateTagsFuture")
return
}
@@ -25562,12 +27147,25 @@ func (vngpf VirtualNetworkGatewayPropertiesFormat) MarshalJSON() ([]byte, error)
// VirtualNetworkGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkGatewaysCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) Result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysCreateOrUpdateFuture.Result.
+func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25575,6 +27173,7 @@ func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) Result(client VirtualN
return
}
if !done {
+ vng.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysCreateOrUpdateFuture")
return
}
@@ -25591,12 +27190,25 @@ func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) Result(client VirtualN
// VirtualNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkGatewaysDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysDeleteFuture) Result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualNetworkGatewaysDeleteFuture.Result.
+func (future *VirtualNetworkGatewaysDeleteFuture) result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25604,6 +27216,7 @@ func (future *VirtualNetworkGatewaysDeleteFuture) Result(client VirtualNetworkGa
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysDeleteFuture")
return
}
@@ -25614,12 +27227,25 @@ func (future *VirtualNetworkGatewaysDeleteFuture) Result(client VirtualNetworkGa
// VirtualNetworkGatewaysGeneratevpnclientpackageFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewaysGeneratevpnclientpackageFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (String, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) Result(client VirtualNetworkGatewaysClient) (s String, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysGeneratevpnclientpackageFuture.Result.
+func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25627,6 +27253,7 @@ func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) Result(clien
return
}
if !done {
+ s.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture")
return
}
@@ -25643,12 +27270,25 @@ func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) Result(clien
// VirtualNetworkGatewaysGenerateVpnProfileFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type VirtualNetworkGatewaysGenerateVpnProfileFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (String, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) Result(client VirtualNetworkGatewaysClient) (s String, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysGenerateVpnProfileFuture.Result.
+func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25656,6 +27296,7 @@ func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) Result(client Virt
return
}
if !done {
+ s.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGenerateVpnProfileFuture")
return
}
@@ -25672,12 +27313,25 @@ func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) Result(client Virt
// VirtualNetworkGatewaysGetAdvertisedRoutesFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type VirtualNetworkGatewaysGetAdvertisedRoutesFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (GatewayRouteListResult, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) Result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysGetAdvertisedRoutesFuture.Result.
+func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25685,6 +27339,7 @@ func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) Result(client Vir
return
}
if !done {
+ grlr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture")
return
}
@@ -25701,12 +27356,25 @@ func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) Result(client Vir
// VirtualNetworkGatewaysGetBgpPeerStatusFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualNetworkGatewaysGetBgpPeerStatusFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (BgpPeerStatusListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) Result(client VirtualNetworkGatewaysClient) (bpslr BgpPeerStatusListResult, err error) {
+// result is the default implementation for VirtualNetworkGatewaysGetBgpPeerStatusFuture.Result.
+func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) result(client VirtualNetworkGatewaysClient) (bpslr BgpPeerStatusListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25714,6 +27382,7 @@ func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) Result(client Virtua
return
}
if !done {
+ bpslr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetBgpPeerStatusFuture")
return
}
@@ -25730,12 +27399,25 @@ func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) Result(client Virtua
// VirtualNetworkGatewaysGetLearnedRoutesFuture an abstraction for monitoring and retrieving the results of
// a long-running operation.
type VirtualNetworkGatewaysGetLearnedRoutesFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (GatewayRouteListResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) Result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) {
+// result is the default implementation for VirtualNetworkGatewaysGetLearnedRoutesFuture.Result.
+func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25743,6 +27425,7 @@ func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) Result(client Virtua
return
}
if !done {
+ grlr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetLearnedRoutesFuture")
return
}
@@ -25759,12 +27442,25 @@ func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) Result(client Virtua
// VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (VpnClientIPsecParameters, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) Result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture.Result.
+func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25772,6 +27468,7 @@ func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) Result(cl
return
}
if !done {
+ vcipp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture")
return
}
@@ -25788,12 +27485,25 @@ func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) Result(cl
// VirtualNetworkGatewaysGetVpnProfilePackageURLFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewaysGetVpnProfilePackageURLFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (String, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) Result(client VirtualNetworkGatewaysClient) (s String, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysGetVpnProfilePackageURLFuture.Result.
+func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25801,6 +27511,7 @@ func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) Result(client
return
}
if !done {
+ s.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture")
return
}
@@ -25827,12 +27538,25 @@ type VirtualNetworkGatewaySku struct {
// VirtualNetworkGatewaysResetFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkGatewaysResetFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysResetFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysResetFuture) Result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) {
+// result is the default implementation for VirtualNetworkGatewaysResetFuture.Result.
+func (future *VirtualNetworkGatewaysResetFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25840,6 +27564,7 @@ func (future *VirtualNetworkGatewaysResetFuture) Result(client VirtualNetworkGat
return
}
if !done {
+ vng.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetFuture")
return
}
@@ -25856,12 +27581,25 @@ func (future *VirtualNetworkGatewaysResetFuture) Result(client VirtualNetworkGat
// VirtualNetworkGatewaysResetVpnClientSharedKeyFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewaysResetVpnClientSharedKeyFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) Result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualNetworkGatewaysResetVpnClientSharedKeyFuture.Result.
+func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25869,6 +27607,7 @@ func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) Result(client
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture")
return
}
@@ -25879,12 +27618,25 @@ func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) Result(client
// VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture an abstraction for monitoring and retrieving the
// results of a long-running operation.
type VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (VpnClientIPsecParameters, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) Result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture.Result.
+func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25892,6 +27644,7 @@ func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) Result(cl
return
}
if !done {
+ vcipp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture")
return
}
@@ -25908,12 +27661,25 @@ func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) Result(cl
// VirtualNetworkGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkGatewaysUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkGatewaysUpdateTagsFuture) Result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworkGatewaysUpdateTagsFuture.Result.
+func (future *VirtualNetworkGatewaysUpdateTagsFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -25921,6 +27687,7 @@ func (future *VirtualNetworkGatewaysUpdateTagsFuture) Result(client VirtualNetwo
return
}
if !done {
+ vng.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysUpdateTagsFuture")
return
}
@@ -26528,12 +28295,25 @@ type VirtualNetworkPeeringPropertiesFormat struct {
// VirtualNetworkPeeringsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkPeeringsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkPeeringsClient) (VirtualNetworkPeering, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) Result(client VirtualNetworkPeeringsClient) (vnp VirtualNetworkPeering, err error) {
+// result is the default implementation for VirtualNetworkPeeringsCreateOrUpdateFuture.Result.
+func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) result(client VirtualNetworkPeeringsClient) (vnp VirtualNetworkPeering, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -26541,6 +28321,7 @@ func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) Result(client VirtualN
return
}
if !done {
+ vnp.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsCreateOrUpdateFuture")
return
}
@@ -26557,12 +28338,25 @@ func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) Result(client VirtualN
// VirtualNetworkPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkPeeringsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkPeeringsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkPeeringsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkPeeringsDeleteFuture) Result(client VirtualNetworkPeeringsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualNetworkPeeringsDeleteFuture.Result.
+func (future *VirtualNetworkPeeringsDeleteFuture) result(client VirtualNetworkPeeringsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -26570,6 +28364,7 @@ func (future *VirtualNetworkPeeringsDeleteFuture) Result(client VirtualNetworkPe
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsDeleteFuture")
return
}
@@ -26602,12 +28397,25 @@ type VirtualNetworkPropertiesFormat struct {
// VirtualNetworksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworksCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworksClient) (VirtualNetwork, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworksCreateOrUpdateFuture) Result(client VirtualNetworksClient) (vn VirtualNetwork, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworksCreateOrUpdateFuture.Result.
+func (future *VirtualNetworksCreateOrUpdateFuture) result(client VirtualNetworksClient) (vn VirtualNetwork, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -26615,6 +28423,7 @@ func (future *VirtualNetworksCreateOrUpdateFuture) Result(client VirtualNetworks
return
}
if !done {
+ vn.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksCreateOrUpdateFuture")
return
}
@@ -26631,12 +28440,25 @@ func (future *VirtualNetworksCreateOrUpdateFuture) Result(client VirtualNetworks
// VirtualNetworksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualNetworksDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworksClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworksDeleteFuture) Result(client VirtualNetworksClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworksDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualNetworksDeleteFuture.Result.
+func (future *VirtualNetworksDeleteFuture) result(client VirtualNetworksClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -26644,6 +28466,7 @@ func (future *VirtualNetworksDeleteFuture) Result(client VirtualNetworksClient)
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksDeleteFuture")
return
}
@@ -26654,12 +28477,25 @@ func (future *VirtualNetworksDeleteFuture) Result(client VirtualNetworksClient)
// VirtualNetworksUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworksUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworksClient) (VirtualNetwork, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworksUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworksUpdateTagsFuture) Result(client VirtualNetworksClient) (vn VirtualNetwork, err error) {
+// result is the default implementation for VirtualNetworksUpdateTagsFuture.Result.
+func (future *VirtualNetworksUpdateTagsFuture) result(client VirtualNetworksClient) (vn VirtualNetwork, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -26667,6 +28503,7 @@ func (future *VirtualNetworksUpdateTagsFuture) Result(client VirtualNetworksClie
return
}
if !done {
+ vn.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksUpdateTagsFuture")
return
}
@@ -26991,12 +28828,25 @@ func (vntpf VirtualNetworkTapPropertiesFormat) MarshalJSON() ([]byte, error) {
// VirtualNetworkTapsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkTapsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkTapsClient) (VirtualNetworkTap, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkTapsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkTapsCreateOrUpdateFuture) Result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) {
+// result is the default implementation for VirtualNetworkTapsCreateOrUpdateFuture.Result.
+func (future *VirtualNetworkTapsCreateOrUpdateFuture) result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27004,6 +28854,7 @@ func (future *VirtualNetworkTapsCreateOrUpdateFuture) Result(client VirtualNetwo
return
}
if !done {
+ vnt.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsCreateOrUpdateFuture")
return
}
@@ -27020,12 +28871,25 @@ func (future *VirtualNetworkTapsCreateOrUpdateFuture) Result(client VirtualNetwo
// VirtualNetworkTapsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkTapsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkTapsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkTapsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkTapsDeleteFuture) Result(client VirtualNetworkTapsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VirtualNetworkTapsDeleteFuture.Result.
+func (future *VirtualNetworkTapsDeleteFuture) result(client VirtualNetworkTapsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27033,6 +28897,7 @@ func (future *VirtualNetworkTapsDeleteFuture) Result(client VirtualNetworkTapsCl
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsDeleteFuture")
return
}
@@ -27043,12 +28908,25 @@ func (future *VirtualNetworkTapsDeleteFuture) Result(client VirtualNetworkTapsCl
// VirtualNetworkTapsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualNetworkTapsUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualNetworkTapsClient) (VirtualNetworkTap, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkTapsUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualNetworkTapsUpdateTagsFuture) Result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) {
+// result is the default implementation for VirtualNetworkTapsUpdateTagsFuture.Result.
+func (future *VirtualNetworkTapsUpdateTagsFuture) result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27056,6 +28934,7 @@ func (future *VirtualNetworkTapsUpdateTagsFuture) Result(client VirtualNetworkTa
return
}
if !done {
+ vnt.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsUpdateTagsFuture")
return
}
@@ -27083,6 +28962,12 @@ type VirtualNetworkUsage struct {
Unit *string `json:"unit,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualNetworkUsage.
+func (vnu VirtualNetworkUsage) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualNetworkUsageName usage strings container.
type VirtualNetworkUsageName struct {
// LocalizedValue - READ-ONLY; Localized subnet size and usage string.
@@ -27091,6 +28976,12 @@ type VirtualNetworkUsageName struct {
Value *string `json:"value,omitempty"`
}
+// MarshalJSON is the custom marshaler for VirtualNetworkUsageName.
+func (vnun VirtualNetworkUsageName) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VirtualWAN virtualWAN Resource.
type VirtualWAN struct {
autorest.Response `json:"-"`
@@ -27257,12 +29148,25 @@ func (vwp VirtualWanProperties) MarshalJSON() ([]byte, error) {
// VirtualWansCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VirtualWansCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualWansClient) (VirtualWAN, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualWansCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualWansCreateOrUpdateFuture) Result(client VirtualWansClient) (vw VirtualWAN, err error) {
+// result is the default implementation for VirtualWansCreateOrUpdateFuture.Result.
+func (future *VirtualWansCreateOrUpdateFuture) result(client VirtualWansClient) (vw VirtualWAN, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27270,6 +29174,7 @@ func (future *VirtualWansCreateOrUpdateFuture) Result(client VirtualWansClient)
return
}
if !done {
+ vw.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualWansCreateOrUpdateFuture")
return
}
@@ -27286,12 +29191,25 @@ func (future *VirtualWansCreateOrUpdateFuture) Result(client VirtualWansClient)
// VirtualWansDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualWansDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualWansClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualWansDeleteFuture) Result(client VirtualWansClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualWansDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualWansDeleteFuture.Result.
+func (future *VirtualWansDeleteFuture) result(client VirtualWansClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27299,6 +29217,7 @@ func (future *VirtualWansDeleteFuture) Result(client VirtualWansClient) (ar auto
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualWansDeleteFuture")
return
}
@@ -27325,12 +29244,25 @@ type VirtualWanSecurityProviders struct {
// VirtualWansUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VirtualWansUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VirtualWansClient) (VirtualWAN, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VirtualWansUpdateTagsFuture) Result(client VirtualWansClient) (vw VirtualWAN, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualWansUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VirtualWansUpdateTagsFuture.Result.
+func (future *VirtualWansUpdateTagsFuture) result(client VirtualWansClient) (vw VirtualWAN, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27338,6 +29270,7 @@ func (future *VirtualWansUpdateTagsFuture) Result(client VirtualWansClient) (vw
return
}
if !done {
+ vw.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VirtualWansUpdateTagsFuture")
return
}
@@ -27773,12 +29706,25 @@ func (vcp VpnConnectionProperties) MarshalJSON() ([]byte, error) {
// VpnConnectionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VpnConnectionsCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnConnectionsClient) (VpnConnection, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnConnectionsCreateOrUpdateFuture) Result(client VpnConnectionsClient) (vc VpnConnection, err error) {
+// result is the default implementation for VpnConnectionsCreateOrUpdateFuture.Result.
+func (future *VpnConnectionsCreateOrUpdateFuture) result(client VpnConnectionsClient) (vc VpnConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27786,6 +29732,7 @@ func (future *VpnConnectionsCreateOrUpdateFuture) Result(client VpnConnectionsCl
return
}
if !done {
+ vc.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsCreateOrUpdateFuture")
return
}
@@ -27802,12 +29749,25 @@ func (future *VpnConnectionsCreateOrUpdateFuture) Result(client VpnConnectionsCl
// VpnConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VpnConnectionsDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnConnectionsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnConnectionsDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnConnectionsDeleteFuture) Result(client VpnConnectionsClient) (ar autorest.Response, err error) {
+// result is the default implementation for VpnConnectionsDeleteFuture.Result.
+func (future *VpnConnectionsDeleteFuture) result(client VpnConnectionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27815,6 +29775,7 @@ func (future *VpnConnectionsDeleteFuture) Result(client VpnConnectionsClient) (a
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsDeleteFuture")
return
}
@@ -27963,12 +29924,25 @@ type VpnGatewayProperties struct {
// VpnGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VpnGatewaysCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnGatewaysClient) (VpnGateway, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnGatewaysCreateOrUpdateFuture) Result(client VpnGatewaysClient) (vg VpnGateway, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VpnGatewaysCreateOrUpdateFuture.Result.
+func (future *VpnGatewaysCreateOrUpdateFuture) result(client VpnGatewaysClient) (vg VpnGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -27976,6 +29950,7 @@ func (future *VpnGatewaysCreateOrUpdateFuture) Result(client VpnGatewaysClient)
return
}
if !done {
+ vg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysCreateOrUpdateFuture")
return
}
@@ -27992,12 +29967,25 @@ func (future *VpnGatewaysCreateOrUpdateFuture) Result(client VpnGatewaysClient)
// VpnGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VpnGatewaysDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnGatewaysClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnGatewaysDeleteFuture) Result(client VpnGatewaysClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnGatewaysDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VpnGatewaysDeleteFuture.Result.
+func (future *VpnGatewaysDeleteFuture) result(client VpnGatewaysClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28005,6 +29993,7 @@ func (future *VpnGatewaysDeleteFuture) Result(client VpnGatewaysClient) (ar auto
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysDeleteFuture")
return
}
@@ -28015,12 +30004,25 @@ func (future *VpnGatewaysDeleteFuture) Result(client VpnGatewaysClient) (ar auto
// VpnGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VpnGatewaysUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnGatewaysClient) (VpnGateway, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnGatewaysUpdateTagsFuture) Result(client VpnGatewaysClient) (vg VpnGateway, err error) {
+// result is the default implementation for VpnGatewaysUpdateTagsFuture.Result.
+func (future *VpnGatewaysUpdateTagsFuture) result(client VpnGatewaysClient) (vg VpnGateway, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28028,6 +30030,7 @@ func (future *VpnGatewaysUpdateTagsFuture) Result(client VpnGatewaysClient) (vg
return
}
if !done {
+ vg.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysUpdateTagsFuture")
return
}
@@ -28168,6 +30171,12 @@ type VpnSiteID struct {
VpnSite *string `json:"vpnSite,omitempty"`
}
+// MarshalJSON is the custom marshaler for VpnSiteID.
+func (vsi VpnSiteID) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VpnSiteProperties parameters for VpnSite
type VpnSiteProperties struct {
// VirtualWan - The VirtualWAN to which the vpnSite belongs
@@ -28191,12 +30200,25 @@ type VpnSiteProperties struct {
// VpnSitesConfigurationDownloadFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type VpnSitesConfigurationDownloadFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnSitesConfigurationClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnSitesConfigurationDownloadFuture) Result(client VpnSitesConfigurationClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnSitesConfigurationDownloadFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VpnSitesConfigurationDownloadFuture.Result.
+func (future *VpnSitesConfigurationDownloadFuture) result(client VpnSitesConfigurationClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28204,6 +30226,7 @@ func (future *VpnSitesConfigurationDownloadFuture) Result(client VpnSitesConfigu
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnSitesConfigurationDownloadFuture")
return
}
@@ -28214,12 +30237,25 @@ func (future *VpnSitesConfigurationDownloadFuture) Result(client VpnSitesConfigu
// VpnSitesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VpnSitesCreateOrUpdateFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnSitesClient) (VpnSite, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnSitesCreateOrUpdateFuture) Result(client VpnSitesClient) (vs VpnSite, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnSitesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VpnSitesCreateOrUpdateFuture.Result.
+func (future *VpnSitesCreateOrUpdateFuture) result(client VpnSitesClient) (vs VpnSite, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28227,6 +30263,7 @@ func (future *VpnSitesCreateOrUpdateFuture) Result(client VpnSitesClient) (vs Vp
return
}
if !done {
+ vs.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnSitesCreateOrUpdateFuture")
return
}
@@ -28243,12 +30280,25 @@ func (future *VpnSitesCreateOrUpdateFuture) Result(client VpnSitesClient) (vs Vp
// VpnSitesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VpnSitesDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnSitesClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnSitesDeleteFuture) Result(client VpnSitesClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnSitesDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for VpnSitesDeleteFuture.Result.
+func (future *VpnSitesDeleteFuture) result(client VpnSitesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28256,6 +30306,7 @@ func (future *VpnSitesDeleteFuture) Result(client VpnSitesClient) (ar autorest.R
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnSitesDeleteFuture")
return
}
@@ -28266,12 +30317,25 @@ func (future *VpnSitesDeleteFuture) Result(client VpnSitesClient) (ar autorest.R
// VpnSitesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type VpnSitesUpdateTagsFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(VpnSitesClient) (VpnSite, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VpnSitesUpdateTagsFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *VpnSitesUpdateTagsFuture) Result(client VpnSitesClient) (vs VpnSite, err error) {
+// result is the default implementation for VpnSitesUpdateTagsFuture.Result.
+func (future *VpnSitesUpdateTagsFuture) result(client VpnSitesClient) (vs VpnSite, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28279,6 +30343,7 @@ func (future *VpnSitesUpdateTagsFuture) Result(client VpnSitesClient) (vs VpnSit
return
}
if !done {
+ vs.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.VpnSitesUpdateTagsFuture")
return
}
@@ -28421,15 +30486,34 @@ type WatcherPropertiesFormat struct {
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
}
+// MarshalJSON is the custom marshaler for WatcherPropertiesFormat.
+func (wpf WatcherPropertiesFormat) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// WatchersCheckConnectivityFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersCheckConnectivityFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (ConnectivityInformation, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersCheckConnectivityFuture) Result(client WatchersClient) (ci ConnectivityInformation, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersCheckConnectivityFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for WatchersCheckConnectivityFuture.Result.
+func (future *WatchersCheckConnectivityFuture) result(client WatchersClient) (ci ConnectivityInformation, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28437,6 +30521,7 @@ func (future *WatchersCheckConnectivityFuture) Result(client WatchersClient) (ci
return
}
if !done {
+ ci.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersCheckConnectivityFuture")
return
}
@@ -28453,12 +30538,25 @@ func (future *WatchersCheckConnectivityFuture) Result(client WatchersClient) (ci
// WatchersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type WatchersDeleteFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (autorest.Response, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersDeleteFuture) Result(client WatchersClient) (ar autorest.Response, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersDeleteFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for WatchersDeleteFuture.Result.
+func (future *WatchersDeleteFuture) result(client WatchersClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28466,6 +30564,7 @@ func (future *WatchersDeleteFuture) Result(client WatchersClient) (ar autorest.R
return
}
if !done {
+ ar.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersDeleteFuture")
return
}
@@ -28476,12 +30575,25 @@ func (future *WatchersDeleteFuture) Result(client WatchersClient) (ar autorest.R
// WatchersGetAzureReachabilityReportFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersGetAzureReachabilityReportFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (AzureReachabilityReport, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersGetAzureReachabilityReportFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersGetAzureReachabilityReportFuture) Result(client WatchersClient) (arr AzureReachabilityReport, err error) {
+// result is the default implementation for WatchersGetAzureReachabilityReportFuture.Result.
+func (future *WatchersGetAzureReachabilityReportFuture) result(client WatchersClient) (arr AzureReachabilityReport, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28489,6 +30601,7 @@ func (future *WatchersGetAzureReachabilityReportFuture) Result(client WatchersCl
return
}
if !done {
+ arr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersGetAzureReachabilityReportFuture")
return
}
@@ -28505,12 +30618,25 @@ func (future *WatchersGetAzureReachabilityReportFuture) Result(client WatchersCl
// WatchersGetFlowLogStatusFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersGetFlowLogStatusFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (FlowLogInformation, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersGetFlowLogStatusFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersGetFlowLogStatusFuture) Result(client WatchersClient) (fli FlowLogInformation, err error) {
+// result is the default implementation for WatchersGetFlowLogStatusFuture.Result.
+func (future *WatchersGetFlowLogStatusFuture) result(client WatchersClient) (fli FlowLogInformation, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28518,6 +30644,7 @@ func (future *WatchersGetFlowLogStatusFuture) Result(client WatchersClient) (fli
return
}
if !done {
+ fli.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersGetFlowLogStatusFuture")
return
}
@@ -28534,12 +30661,25 @@ func (future *WatchersGetFlowLogStatusFuture) Result(client WatchersClient) (fli
// WatchersGetNetworkConfigurationDiagnosticFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type WatchersGetNetworkConfigurationDiagnosticFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (ConfigurationDiagnosticResponse, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersGetNetworkConfigurationDiagnosticFuture) Result(client WatchersClient) (cdr ConfigurationDiagnosticResponse, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersGetNetworkConfigurationDiagnosticFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for WatchersGetNetworkConfigurationDiagnosticFuture.Result.
+func (future *WatchersGetNetworkConfigurationDiagnosticFuture) result(client WatchersClient) (cdr ConfigurationDiagnosticResponse, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28547,6 +30687,7 @@ func (future *WatchersGetNetworkConfigurationDiagnosticFuture) Result(client Wat
return
}
if !done {
+ cdr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersGetNetworkConfigurationDiagnosticFuture")
return
}
@@ -28563,12 +30704,25 @@ func (future *WatchersGetNetworkConfigurationDiagnosticFuture) Result(client Wat
// WatchersGetNextHopFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type WatchersGetNextHopFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (NextHopResult, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersGetNextHopFuture) Result(client WatchersClient) (nhr NextHopResult, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersGetNextHopFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for WatchersGetNextHopFuture.Result.
+func (future *WatchersGetNextHopFuture) result(client WatchersClient) (nhr NextHopResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28576,6 +30730,7 @@ func (future *WatchersGetNextHopFuture) Result(client WatchersClient) (nhr NextH
return
}
if !done {
+ nhr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersGetNextHopFuture")
return
}
@@ -28592,12 +30747,25 @@ func (future *WatchersGetNextHopFuture) Result(client WatchersClient) (nhr NextH
// WatchersGetTroubleshootingFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersGetTroubleshootingFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (TroubleshootingResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersGetTroubleshootingFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersGetTroubleshootingFuture) Result(client WatchersClient) (tr TroubleshootingResult, err error) {
+// result is the default implementation for WatchersGetTroubleshootingFuture.Result.
+func (future *WatchersGetTroubleshootingFuture) result(client WatchersClient) (tr TroubleshootingResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28605,6 +30773,7 @@ func (future *WatchersGetTroubleshootingFuture) Result(client WatchersClient) (t
return
}
if !done {
+ tr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingFuture")
return
}
@@ -28621,12 +30790,25 @@ func (future *WatchersGetTroubleshootingFuture) Result(client WatchersClient) (t
// WatchersGetTroubleshootingResultFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersGetTroubleshootingResultFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (TroubleshootingResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersGetTroubleshootingResultFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersGetTroubleshootingResultFuture) Result(client WatchersClient) (tr TroubleshootingResult, err error) {
+// result is the default implementation for WatchersGetTroubleshootingResultFuture.Result.
+func (future *WatchersGetTroubleshootingResultFuture) result(client WatchersClient) (tr TroubleshootingResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28634,6 +30816,7 @@ func (future *WatchersGetTroubleshootingResultFuture) Result(client WatchersClie
return
}
if !done {
+ tr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingResultFuture")
return
}
@@ -28650,12 +30833,25 @@ func (future *WatchersGetTroubleshootingResultFuture) Result(client WatchersClie
// WatchersGetVMSecurityRulesFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersGetVMSecurityRulesFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (SecurityGroupViewResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersGetVMSecurityRulesFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersGetVMSecurityRulesFuture) Result(client WatchersClient) (sgvr SecurityGroupViewResult, err error) {
+// result is the default implementation for WatchersGetVMSecurityRulesFuture.Result.
+func (future *WatchersGetVMSecurityRulesFuture) result(client WatchersClient) (sgvr SecurityGroupViewResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28663,6 +30859,7 @@ func (future *WatchersGetVMSecurityRulesFuture) Result(client WatchersClient) (s
return
}
if !done {
+ sgvr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersGetVMSecurityRulesFuture")
return
}
@@ -28679,12 +30876,25 @@ func (future *WatchersGetVMSecurityRulesFuture) Result(client WatchersClient) (s
// WatchersListAvailableProvidersFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersListAvailableProvidersFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (AvailableProvidersList, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersListAvailableProvidersFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersListAvailableProvidersFuture) Result(client WatchersClient) (apl AvailableProvidersList, err error) {
+// result is the default implementation for WatchersListAvailableProvidersFuture.Result.
+func (future *WatchersListAvailableProvidersFuture) result(client WatchersClient) (apl AvailableProvidersList, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28692,6 +30902,7 @@ func (future *WatchersListAvailableProvidersFuture) Result(client WatchersClient
return
}
if !done {
+ apl.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersListAvailableProvidersFuture")
return
}
@@ -28708,12 +30919,25 @@ func (future *WatchersListAvailableProvidersFuture) Result(client WatchersClient
// WatchersSetFlowLogConfigurationFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type WatchersSetFlowLogConfigurationFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (FlowLogInformation, error)
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersSetFlowLogConfigurationFuture) Result(client WatchersClient) (fli FlowLogInformation, err error) {
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersSetFlowLogConfigurationFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for WatchersSetFlowLogConfigurationFuture.Result.
+func (future *WatchersSetFlowLogConfigurationFuture) result(client WatchersClient) (fli FlowLogInformation, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28721,6 +30945,7 @@ func (future *WatchersSetFlowLogConfigurationFuture) Result(client WatchersClien
return
}
if !done {
+ fli.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersSetFlowLogConfigurationFuture")
return
}
@@ -28737,12 +30962,25 @@ func (future *WatchersSetFlowLogConfigurationFuture) Result(client WatchersClien
// WatchersVerifyIPFlowFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type WatchersVerifyIPFlowFuture struct {
- azure.Future
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(WatchersClient) (VerificationIPFlowResult, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *WatchersVerifyIPFlowFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
}
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *WatchersVerifyIPFlowFuture) Result(client WatchersClient) (vifr VerificationIPFlowResult, err error) {
+// result is the default implementation for WatchersVerifyIPFlowFuture.Result.
+func (future *WatchersVerifyIPFlowFuture) result(client WatchersClient) (vifr VerificationIPFlowResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -28750,6 +30988,7 @@ func (future *WatchersVerifyIPFlowFuture) Result(client WatchersClient) (vifr Ve
return
}
if !done {
+ vifr.Response.Response = future.Response()
err = azure.NewAsyncOpIncompleteError("network.WatchersVerifyIPFlowFuture")
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/operations.go
index bb5860136..2460d3810 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/operations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/operations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -74,6 +63,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe
}
if result.olr.hasNextLink() && result.olr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -129,7 +119,6 @@ func (client OperationsClient) listNextResults(ctx context.Context, lastResults
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpngateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpngateways.go
index 9421447ee..8d3162591 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpngateways.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpngateways.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client P2sVpnGatewaysClient) CreateOrUpdate(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/P2sVpnGatewaysClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client P2sVpnGatewaysClient) CreateOrUpdate(ctx context.Context, resourceG
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client P2sVpnGatewaysClient) CreateOrUpdateSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client P2sVpnGatewaysClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/P2sVpnGatewaysClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client P2sVpnGatewaysClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client P2sVpnGatewaysClient) DeleteSender(req *http.Request) (future P2sVp
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -204,8 +199,8 @@ func (client P2sVpnGatewaysClient) GenerateVpnProfile(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/P2sVpnGatewaysClient.GenerateVpnProfile")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -218,7 +213,7 @@ func (client P2sVpnGatewaysClient) GenerateVpnProfile(ctx context.Context, resou
result, err = client.GenerateVpnProfileSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "GenerateVpnProfile", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "GenerateVpnProfile", nil, "Failure sending request")
return
}
@@ -256,7 +251,10 @@ func (client P2sVpnGatewaysClient) GenerateVpnProfileSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -381,6 +379,7 @@ func (client P2sVpnGatewaysClient) List(ctx context.Context) (result ListP2SVpnG
}
if result.lpvgr.hasNextLink() && result.lpvgr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -440,7 +439,6 @@ func (client P2sVpnGatewaysClient) listNextResults(ctx context.Context, lastResu
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -496,6 +494,7 @@ func (client P2sVpnGatewaysClient) ListByResourceGroup(ctx context.Context, reso
}
if result.lpvgr.hasNextLink() && result.lpvgr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -556,7 +555,6 @@ func (client P2sVpnGatewaysClient) listByResourceGroupNextResults(ctx context.Co
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -587,8 +585,8 @@ func (client P2sVpnGatewaysClient) UpdateTags(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/P2sVpnGatewaysClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -601,7 +599,7 @@ func (client P2sVpnGatewaysClient) UpdateTags(ctx context.Context, resourceGroup
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -639,7 +637,10 @@ func (client P2sVpnGatewaysClient) UpdateTagsSender(req *http.Request) (future P
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpnserverconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpnserverconfigurations.go
index 456208f5b..245702951 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpnserverconfigurations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/p2svpnserverconfigurations.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client P2sVpnServerConfigurationsClient) CreateOrUpdate(ctx context.Contex
ctx = tracing.StartSpan(ctx, fqdn+"/P2sVpnServerConfigurationsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -68,7 +57,7 @@ func (client P2sVpnServerConfigurationsClient) CreateOrUpdate(ctx context.Contex
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -108,7 +97,10 @@ func (client P2sVpnServerConfigurationsClient) CreateOrUpdateSender(req *http.Re
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -134,8 +126,8 @@ func (client P2sVpnServerConfigurationsClient) Delete(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/P2sVpnServerConfigurationsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -148,7 +140,7 @@ func (client P2sVpnServerConfigurationsClient) Delete(ctx context.Context, resou
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsClient", "Delete", nil, "Failure sending request")
return
}
@@ -185,7 +177,10 @@ func (client P2sVpnServerConfigurationsClient) DeleteSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -314,6 +309,7 @@ func (client P2sVpnServerConfigurationsClient) ListByVirtualWan(ctx context.Cont
}
if result.lpvscr.hasNextLink() && result.lpvscr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -375,7 +371,6 @@ func (client P2sVpnServerConfigurationsClient) listByVirtualWanNextResults(ctx c
result, err = client.ListByVirtualWanResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsClient", "listByVirtualWanNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/packetcaptures.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/packetcaptures.go
index 025447840..67defe0b1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/packetcaptures.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/packetcaptures.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client PacketCapturesClient) Create(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/PacketCapturesClient.Create")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -76,7 +65,7 @@ func (client PacketCapturesClient) Create(ctx context.Context, resourceGroupName
result, err = client.CreateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Create", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Create", nil, "Failure sending request")
return
}
@@ -115,7 +104,10 @@ func (client PacketCapturesClient) CreateSender(req *http.Request) (future Packe
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -141,8 +133,8 @@ func (client PacketCapturesClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/PacketCapturesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -155,7 +147,7 @@ func (client PacketCapturesClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Delete", nil, "Failure sending request")
return
}
@@ -192,7 +184,10 @@ func (client PacketCapturesClient) DeleteSender(req *http.Request) (future Packe
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -295,8 +290,8 @@ func (client PacketCapturesClient) GetStatus(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/PacketCapturesClient.GetStatus")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -309,7 +304,7 @@ func (client PacketCapturesClient) GetStatus(ctx context.Context, resourceGroupN
result, err = client.GetStatusSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "GetStatus", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "GetStatus", nil, "Failure sending request")
return
}
@@ -346,7 +341,10 @@ func (client PacketCapturesClient) GetStatusSender(req *http.Request) (future Pa
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -448,8 +446,8 @@ func (client PacketCapturesClient) Stop(ctx context.Context, resourceGroupName s
ctx = tracing.StartSpan(ctx, fqdn+"/PacketCapturesClient.Stop")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -462,7 +460,7 @@ func (client PacketCapturesClient) Stop(ctx context.Context, resourceGroupName s
result, err = client.StopSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Stop", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Stop", nil, "Failure sending request")
return
}
@@ -499,7 +497,10 @@ func (client PacketCapturesClient) StopSender(req *http.Request) (future PacketC
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/profiles.go
index b97a92468..19d1399dd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/profiles.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/profiles.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -129,8 +118,8 @@ func (client ProfilesClient) Delete(ctx context.Context, resourceGroupName strin
ctx = tracing.StartSpan(ctx, fqdn+"/ProfilesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +132,7 @@ func (client ProfilesClient) Delete(ctx context.Context, resourceGroupName strin
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ProfilesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ProfilesClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +168,10 @@ func (client ProfilesClient) DeleteSender(req *http.Request) (future ProfilesDel
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -309,6 +301,7 @@ func (client ProfilesClient) List(ctx context.Context, resourceGroupName string)
}
if result.plr.hasNextLink() && result.plr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -369,7 +362,6 @@ func (client ProfilesClient) listNextResults(ctx context.Context, lastResults Pr
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ProfilesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -423,6 +415,7 @@ func (client ProfilesClient) ListAll(ctx context.Context) (result ProfileListRes
}
if result.plr.hasNextLink() && result.plr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -482,7 +475,6 @@ func (client ProfilesClient) listAllNextResults(ctx context.Context, lastResults
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ProfilesClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipaddresses.go
index fba0280b4..ecac8ee4d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipaddresses.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipaddresses.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client PublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -78,7 +67,7 @@ func (client PublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resour
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -116,7 +105,10 @@ func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -141,8 +133,8 @@ func (client PublicIPAddressesClient) Delete(ctx context.Context, resourceGroupN
ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -155,7 +147,7 @@ func (client PublicIPAddressesClient) Delete(ctx context.Context, resourceGroupN
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", nil, "Failure sending request")
return
}
@@ -191,7 +183,10 @@ func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (future Pu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -409,6 +404,7 @@ func (client PublicIPAddressesClient) List(ctx context.Context, resourceGroupNam
}
if result.pialr.hasNextLink() && result.pialr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -469,7 +465,6 @@ func (client PublicIPAddressesClient) listNextResults(ctx context.Context, lastR
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -523,6 +518,7 @@ func (client PublicIPAddressesClient) ListAll(ctx context.Context) (result Publi
}
if result.pialr.hasNextLink() && result.pialr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -582,7 +578,6 @@ func (client PublicIPAddressesClient) listAllNextResults(ctx context.Context, la
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -640,6 +635,7 @@ func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddresse
}
if result.pialr.hasNextLink() && result.pialr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -701,7 +697,6 @@ func (client PublicIPAddressesClient) listVirtualMachineScaleSetPublicIPAddresse
result, err = client.ListVirtualMachineScaleSetPublicIPAddressesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listVirtualMachineScaleSetPublicIPAddressesNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -762,6 +757,7 @@ func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddres
}
if result.pialr.hasNextLink() && result.pialr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -826,7 +822,6 @@ func (client PublicIPAddressesClient) listVirtualMachineScaleSetVMPublicIPAddres
result, err = client.ListVirtualMachineScaleSetVMPublicIPAddressesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listVirtualMachineScaleSetVMPublicIPAddressesNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -857,8 +852,8 @@ func (client PublicIPAddressesClient) UpdateTags(ctx context.Context, resourceGr
ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -871,7 +866,7 @@ func (client PublicIPAddressesClient) UpdateTags(ctx context.Context, resourceGr
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -909,7 +904,10 @@ func (client PublicIPAddressesClient) UpdateTagsSender(req *http.Request) (futur
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipprefixes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipprefixes.go
index 6f017a4db..f02f88b38 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipprefixes.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/publicipprefixes.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client PublicIPPrefixesClient) CreateOrUpdate(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPPrefixesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client PublicIPPrefixesClient) CreateOrUpdate(ctx context.Context, resourc
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client PublicIPPrefixesClient) CreateOrUpdateSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client PublicIPPrefixesClient) Delete(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPPrefixesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client PublicIPPrefixesClient) Delete(ctx context.Context, resourceGroupNa
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client PublicIPPrefixesClient) DeleteSender(req *http.Request) (future Pub
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -309,6 +304,7 @@ func (client PublicIPPrefixesClient) List(ctx context.Context, resourceGroupName
}
if result.piplr.hasNextLink() && result.piplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -369,7 +365,6 @@ func (client PublicIPPrefixesClient) listNextResults(ctx context.Context, lastRe
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -423,6 +418,7 @@ func (client PublicIPPrefixesClient) ListAll(ctx context.Context) (result Public
}
if result.piplr.hasNextLink() && result.piplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -482,7 +478,6 @@ func (client PublicIPPrefixesClient) listAllNextResults(ctx context.Context, las
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -513,8 +508,8 @@ func (client PublicIPPrefixesClient) UpdateTags(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPPrefixesClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -527,7 +522,7 @@ func (client PublicIPPrefixesClient) UpdateTags(ctx context.Context, resourceGro
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -565,7 +560,10 @@ func (client PublicIPPrefixesClient) UpdateTagsSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilterrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilterrules.go
index 2d2866b55..c152fe31c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilterrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilterrules.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client RouteFilterRulesClient) CreateOrUpdate(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/RouteFilterRulesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -77,7 +66,7 @@ func (client RouteFilterRulesClient) CreateOrUpdate(ctx context.Context, resourc
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -117,7 +106,10 @@ func (client RouteFilterRulesClient) CreateOrUpdateSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -143,8 +135,8 @@ func (client RouteFilterRulesClient) Delete(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/RouteFilterRulesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -157,7 +149,7 @@ func (client RouteFilterRulesClient) Delete(ctx context.Context, resourceGroupNa
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Delete", nil, "Failure sending request")
return
}
@@ -194,7 +186,10 @@ func (client RouteFilterRulesClient) DeleteSender(req *http.Request) (future Rou
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -323,6 +318,7 @@ func (client RouteFilterRulesClient) ListByRouteFilter(ctx context.Context, reso
}
if result.rfrlr.hasNextLink() && result.rfrlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -384,7 +380,6 @@ func (client RouteFilterRulesClient) listByRouteFilterNextResults(ctx context.Co
result, err = client.ListByRouteFilterResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "listByRouteFilterNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -416,8 +411,8 @@ func (client RouteFilterRulesClient) Update(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/RouteFilterRulesClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -430,7 +425,7 @@ func (client RouteFilterRulesClient) Update(ctx context.Context, resourceGroupNa
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Update", nil, "Failure sending request")
return
}
@@ -471,7 +466,10 @@ func (client RouteFilterRulesClient) UpdateSender(req *http.Request) (future Rou
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilters.go
index ff6aa9b0d..bf209a260 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilters.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routefilters.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client RouteFiltersClient) CreateOrUpdate(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/RouteFiltersClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client RouteFiltersClient) CreateOrUpdate(ctx context.Context, resourceGro
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client RouteFiltersClient) CreateOrUpdateSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client RouteFiltersClient) Delete(ctx context.Context, resourceGroupName s
ctx = tracing.StartSpan(ctx, fqdn+"/RouteFiltersClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client RouteFiltersClient) Delete(ctx context.Context, resourceGroupName s
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client RouteFiltersClient) DeleteSender(req *http.Request) (future RouteFi
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -307,6 +302,7 @@ func (client RouteFiltersClient) List(ctx context.Context) (result RouteFilterLi
}
if result.rflr.hasNextLink() && result.rflr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -366,7 +362,6 @@ func (client RouteFiltersClient) listNextResults(ctx context.Context, lastResult
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -422,6 +417,7 @@ func (client RouteFiltersClient) ListByResourceGroup(ctx context.Context, resour
}
if result.rflr.hasNextLink() && result.rflr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -482,7 +478,6 @@ func (client RouteFiltersClient) listByResourceGroupNextResults(ctx context.Cont
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -513,8 +508,8 @@ func (client RouteFiltersClient) Update(ctx context.Context, resourceGroupName s
ctx = tracing.StartSpan(ctx, fqdn+"/RouteFiltersClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -527,7 +522,7 @@ func (client RouteFiltersClient) Update(ctx context.Context, resourceGroupName s
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Update", nil, "Failure sending request")
return
}
@@ -568,7 +563,10 @@ func (client RouteFiltersClient) UpdateSender(req *http.Request) (future RouteFi
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routes.go
index e97ede08f..5a4d23e51 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routes.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routes.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client RoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/RoutesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client RoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -105,7 +94,10 @@ func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (future Route
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -131,8 +123,8 @@ func (client RoutesClient) Delete(ctx context.Context, resourceGroupName string,
ctx = tracing.StartSpan(ctx, fqdn+"/RoutesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -145,7 +137,7 @@ func (client RoutesClient) Delete(ctx context.Context, resourceGroupName string,
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", nil, "Failure sending request")
return
}
@@ -182,7 +174,10 @@ func (client RoutesClient) DeleteSender(req *http.Request) (future RoutesDeleteF
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -311,6 +306,7 @@ func (client RoutesClient) List(ctx context.Context, resourceGroupName string, r
}
if result.rlr.hasNextLink() && result.rlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -372,7 +368,6 @@ func (client RoutesClient) listNextResults(ctx context.Context, lastResults Rout
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.RoutesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routetables.go
index 919a9b425..dbe91404b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routetables.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/routetables.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client RouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client RouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGrou
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -103,7 +92,10 @@ func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -128,8 +120,8 @@ func (client RouteTablesClient) Delete(ctx context.Context, resourceGroupName st
ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -142,7 +134,7 @@ func (client RouteTablesClient) Delete(ctx context.Context, resourceGroupName st
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", nil, "Failure sending request")
return
}
@@ -178,7 +170,10 @@ func (client RouteTablesClient) DeleteSender(req *http.Request) (future RouteTab
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -308,6 +303,7 @@ func (client RouteTablesClient) List(ctx context.Context, resourceGroupName stri
}
if result.rtlr.hasNextLink() && result.rtlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -368,7 +364,6 @@ func (client RouteTablesClient) listNextResults(ctx context.Context, lastResults
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -422,6 +417,7 @@ func (client RouteTablesClient) ListAll(ctx context.Context) (result RouteTableL
}
if result.rtlr.hasNextLink() && result.rtlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -481,7 +477,6 @@ func (client RouteTablesClient) listAllNextResults(ctx context.Context, lastResu
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -512,8 +507,8 @@ func (client RouteTablesClient) UpdateTags(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -526,7 +521,7 @@ func (client RouteTablesClient) UpdateTags(ctx context.Context, resourceGroupNam
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -564,7 +559,10 @@ func (client RouteTablesClient) UpdateTagsSender(req *http.Request) (future Rout
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securitygroups.go
index c2efb6ddc..323aa65ad 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securitygroups.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securitygroups.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client SecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client SecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceG
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -103,7 +92,10 @@ func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -128,8 +120,8 @@ func (client SecurityGroupsClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -142,7 +134,7 @@ func (client SecurityGroupsClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure sending request")
return
}
@@ -178,7 +170,10 @@ func (client SecurityGroupsClient) DeleteSender(req *http.Request) (future Secur
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -308,6 +303,7 @@ func (client SecurityGroupsClient) List(ctx context.Context, resourceGroupName s
}
if result.sglr.hasNextLink() && result.sglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -368,7 +364,6 @@ func (client SecurityGroupsClient) listNextResults(ctx context.Context, lastResu
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -422,6 +417,7 @@ func (client SecurityGroupsClient) ListAll(ctx context.Context) (result Security
}
if result.sglr.hasNextLink() && result.sglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -481,7 +477,6 @@ func (client SecurityGroupsClient) listAllNextResults(ctx context.Context, lastR
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -512,8 +507,8 @@ func (client SecurityGroupsClient) UpdateTags(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -526,7 +521,7 @@ func (client SecurityGroupsClient) UpdateTags(ctx context.Context, resourceGroup
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -564,7 +559,10 @@ func (client SecurityGroupsClient) UpdateTagsSender(req *http.Request) (future S
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securityrules.go
index bc18b6d48..67e2abb64 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securityrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/securityrules.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client SecurityRulesClient) CreateOrUpdate(ctx context.Context, resourceGr
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRulesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client SecurityRulesClient) CreateOrUpdate(ctx context.Context, resourceGr
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -105,7 +94,10 @@ func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (futur
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -131,8 +123,8 @@ func (client SecurityRulesClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRulesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -145,7 +137,7 @@ func (client SecurityRulesClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", nil, "Failure sending request")
return
}
@@ -182,7 +174,10 @@ func (client SecurityRulesClient) DeleteSender(req *http.Request) (future Securi
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -311,6 +306,7 @@ func (client SecurityRulesClient) List(ctx context.Context, resourceGroupName st
}
if result.srlr.hasNextLink() && result.srlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -372,7 +368,6 @@ func (client SecurityRulesClient) listNextResults(ctx context.Context, lastResul
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicies.go
index b1415e6eb..8c543cae4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicies.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicies.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client ServiceEndpointPoliciesClient) CreateOrUpdate(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPoliciesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client ServiceEndpointPoliciesClient) CreateOrUpdate(ctx context.Context,
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client ServiceEndpointPoliciesClient) CreateOrUpdateSender(req *http.Reque
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client ServiceEndpointPoliciesClient) Delete(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPoliciesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client ServiceEndpointPoliciesClient) Delete(ctx context.Context, resource
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client ServiceEndpointPoliciesClient) DeleteSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -307,6 +302,7 @@ func (client ServiceEndpointPoliciesClient) List(ctx context.Context) (result Se
}
if result.seplr.hasNextLink() && result.seplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -366,7 +362,6 @@ func (client ServiceEndpointPoliciesClient) listNextResults(ctx context.Context,
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -422,6 +417,7 @@ func (client ServiceEndpointPoliciesClient) ListByResourceGroup(ctx context.Cont
}
if result.seplr.hasNextLink() && result.seplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -482,7 +478,6 @@ func (client ServiceEndpointPoliciesClient) listByResourceGroupNextResults(ctx c
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -513,8 +508,8 @@ func (client ServiceEndpointPoliciesClient) Update(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPoliciesClient.Update")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -527,7 +522,7 @@ func (client ServiceEndpointPoliciesClient) Update(ctx context.Context, resource
result, err = client.UpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Update", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Update", nil, "Failure sending request")
return
}
@@ -565,7 +560,10 @@ func (client ServiceEndpointPoliciesClient) UpdateSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicydefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicydefinitions.go
index e4d530e6c..090727f76 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicydefinitions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/serviceendpointpolicydefinitions.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdate(ctx context.
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPolicyDefinitionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -68,7 +57,7 @@ func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdate(ctx context.
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -107,7 +96,10 @@ func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdateSender(req *h
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -133,8 +125,8 @@ func (client ServiceEndpointPolicyDefinitionsClient) Delete(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPolicyDefinitionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -147,7 +139,7 @@ func (client ServiceEndpointPolicyDefinitionsClient) Delete(ctx context.Context,
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -184,7 +176,10 @@ func (client ServiceEndpointPolicyDefinitionsClient) DeleteSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -313,6 +308,7 @@ func (client ServiceEndpointPolicyDefinitionsClient) ListByResourceGroup(ctx con
}
if result.sepdlr.hasNextLink() && result.sepdlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -374,7 +370,6 @@ func (client ServiceEndpointPolicyDefinitionsClient) listByResourceGroupNextResu
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/subnets.go
index 86ea02a0c..34ed22a53 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/subnets.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/subnets.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client SubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client SubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupNam
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -105,7 +94,10 @@ func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (future Subn
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -131,8 +123,8 @@ func (client SubnetsClient) Delete(ctx context.Context, resourceGroupName string
ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -145,7 +137,7 @@ func (client SubnetsClient) Delete(ctx context.Context, resourceGroupName string
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", nil, "Failure sending request")
return
}
@@ -182,7 +174,10 @@ func (client SubnetsClient) DeleteSender(req *http.Request) (future SubnetsDelet
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -315,6 +310,7 @@ func (client SubnetsClient) List(ctx context.Context, resourceGroupName string,
}
if result.slr.hasNextLink() && result.slr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -376,7 +372,6 @@ func (client SubnetsClient) listNextResults(ctx context.Context, lastResults Sub
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SubnetsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/usages.go
index 56feb4112..7c0d39b64 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/usages.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/usages.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -83,6 +72,7 @@ func (client UsagesClient) List(ctx context.Context, location string) (result Us
}
if result.ulr.hasNextLink() && result.ulr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -143,7 +133,6 @@ func (client UsagesClient) listNextResults(ctx context.Context, lastResults Usag
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.UsagesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/version.go
index 07e2dd0c8..aea31f6ca 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/version.go
@@ -2,19 +2,8 @@ package network
import "github.com/Azure/azure-sdk-for-go/version"
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualhubs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualhubs.go
index 4aa572275..f8c6a6cec 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualhubs.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualhubs.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client VirtualHubsClient) CreateOrUpdate(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualHubsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client VirtualHubsClient) CreateOrUpdate(ctx context.Context, resourceGrou
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client VirtualHubsClient) CreateOrUpdateSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client VirtualHubsClient) Delete(ctx context.Context, resourceGroupName st
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualHubsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client VirtualHubsClient) Delete(ctx context.Context, resourceGroupName st
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client VirtualHubsClient) DeleteSender(req *http.Request) (future VirtualH
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -303,6 +298,7 @@ func (client VirtualHubsClient) List(ctx context.Context) (result ListVirtualHub
}
if result.lvhr.hasNextLink() && result.lvhr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -362,7 +358,6 @@ func (client VirtualHubsClient) listNextResults(ctx context.Context, lastResults
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -418,6 +413,7 @@ func (client VirtualHubsClient) ListByResourceGroup(ctx context.Context, resourc
}
if result.lvhr.hasNextLink() && result.lvhr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -478,7 +474,6 @@ func (client VirtualHubsClient) listByResourceGroupNextResults(ctx context.Conte
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -509,8 +504,8 @@ func (client VirtualHubsClient) UpdateTags(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualHubsClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -523,7 +518,7 @@ func (client VirtualHubsClient) UpdateTags(ctx context.Context, resourceGroupNam
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualHubsClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -561,7 +556,10 @@ func (client VirtualHubsClient) UpdateTagsSender(req *http.Request) (future Virt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgatewayconnections.go
index 225e0263e..dc62f4c0b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgatewayconnections.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgatewayconnections.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(ctx context.
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -80,7 +69,7 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(ctx context.
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -118,7 +107,10 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *h
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -143,8 +135,8 @@ func (client VirtualNetworkGatewayConnectionsClient) Delete(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -157,7 +149,7 @@ func (client VirtualNetworkGatewayConnectionsClient) Delete(ctx context.Context,
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -193,7 +185,10 @@ func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -397,6 +392,7 @@ func (client VirtualNetworkGatewayConnectionsClient) List(ctx context.Context, r
}
if result.vngclr.hasNextLink() && result.vngclr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -457,7 +453,6 @@ func (client VirtualNetworkGatewayConnectionsClient) listNextResults(ctx context
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -491,8 +486,8 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(ctx context.
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.ResetSharedKey")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -514,7 +509,7 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(ctx context.
result, err = client.ResetSharedKeySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", nil, "Failure sending request")
return
}
@@ -552,7 +547,10 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *h
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -581,8 +579,8 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(ctx context.Co
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.SetSharedKey")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -601,7 +599,7 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(ctx context.Co
result, err = client.SetSharedKeySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", nil, "Failure sending request")
return
}
@@ -639,7 +637,10 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *htt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -665,8 +666,8 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTags(ctx context.Cont
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -679,7 +680,7 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTags(ctx context.Cont
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -717,7 +718,10 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTagsSender(req *http.
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgateways.go
index edb7aa4a3..d6ccc5e48 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgateways.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkgateways.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdate(ctx context.Context, r
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -73,7 +62,7 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdate(ctx context.Context, r
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -111,7 +100,10 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Reques
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -136,8 +128,8 @@ func (client VirtualNetworkGatewaysClient) Delete(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -150,7 +142,7 @@ func (client VirtualNetworkGatewaysClient) Delete(ctx context.Context, resourceG
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", nil, "Failure sending request")
return
}
@@ -186,7 +178,10 @@ func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -212,8 +207,8 @@ func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(ctx context.
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.Generatevpnclientpackage")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -226,7 +221,7 @@ func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(ctx context.
result, err = client.GeneratevpnclientpackageSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure sending request")
return
}
@@ -264,7 +259,10 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *h
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -291,8 +289,8 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfile(ctx context.Contex
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.GenerateVpnProfile")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -305,7 +303,7 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfile(ctx context.Contex
result, err = client.GenerateVpnProfileSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GenerateVpnProfile", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GenerateVpnProfile", nil, "Failure sending request")
return
}
@@ -343,7 +341,10 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfileSender(req *http.Re
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -446,8 +447,8 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutes(ctx context.Conte
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.GetAdvertisedRoutes")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -460,7 +461,7 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutes(ctx context.Conte
result, err = client.GetAdvertisedRoutesSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetAdvertisedRoutes", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetAdvertisedRoutes", nil, "Failure sending request")
return
}
@@ -497,7 +498,10 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesSender(req *http.R
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -523,8 +527,8 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatus(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.GetBgpPeerStatus")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -537,7 +541,7 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatus(ctx context.Context,
result, err = client.GetBgpPeerStatusSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetBgpPeerStatus", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetBgpPeerStatus", nil, "Failure sending request")
return
}
@@ -576,7 +580,10 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -602,8 +609,8 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutes(ctx context.Context,
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.GetLearnedRoutes")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -616,7 +623,7 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutes(ctx context.Context,
result, err = client.GetLearnedRoutesSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetLearnedRoutes", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetLearnedRoutes", nil, "Failure sending request")
return
}
@@ -652,7 +659,10 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesSender(req *http.Requ
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -679,8 +689,8 @@ func (client VirtualNetworkGatewaysClient) GetVpnclientIpsecParameters(ctx conte
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.GetVpnclientIpsecParameters")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -693,7 +703,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnclientIpsecParameters(ctx conte
result, err = client.GetVpnclientIpsecParametersSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetVpnclientIpsecParameters", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetVpnclientIpsecParameters", nil, "Failure sending request")
return
}
@@ -729,7 +739,10 @@ func (client VirtualNetworkGatewaysClient) GetVpnclientIpsecParametersSender(req
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -755,8 +768,8 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURL(ctx context.C
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.GetVpnProfilePackageURL")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -769,7 +782,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURL(ctx context.C
result, err = client.GetVpnProfilePackageURLSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetVpnProfilePackageURL", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetVpnProfilePackageURL", nil, "Failure sending request")
return
}
@@ -805,7 +818,10 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLSender(req *ht
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -856,6 +872,7 @@ func (client VirtualNetworkGatewaysClient) List(ctx context.Context, resourceGro
}
if result.vnglr.hasNextLink() && result.vnglr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -916,7 +933,6 @@ func (client VirtualNetworkGatewaysClient) listNextResults(ctx context.Context,
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -973,6 +989,7 @@ func (client VirtualNetworkGatewaysClient) ListConnections(ctx context.Context,
}
if result.vnglcr.hasNextLink() && result.vnglcr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -1034,7 +1051,6 @@ func (client VirtualNetworkGatewaysClient) listConnectionsNextResults(ctx contex
result, err = client.ListConnectionsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "listConnectionsNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -1066,8 +1082,8 @@ func (client VirtualNetworkGatewaysClient) Reset(ctx context.Context, resourceGr
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.Reset")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1080,7 +1096,7 @@ func (client VirtualNetworkGatewaysClient) Reset(ctx context.Context, resourceGr
result, err = client.ResetSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", nil, "Failure sending request")
return
}
@@ -1119,7 +1135,10 @@ func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (futur
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1145,8 +1164,8 @@ func (client VirtualNetworkGatewaysClient) ResetVpnClientSharedKey(ctx context.C
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.ResetVpnClientSharedKey")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1159,7 +1178,7 @@ func (client VirtualNetworkGatewaysClient) ResetVpnClientSharedKey(ctx context.C
result, err = client.ResetVpnClientSharedKeySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ResetVpnClientSharedKey", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ResetVpnClientSharedKey", nil, "Failure sending request")
return
}
@@ -1195,7 +1214,10 @@ func (client VirtualNetworkGatewaysClient) ResetVpnClientSharedKeySender(req *ht
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1222,8 +1244,8 @@ func (client VirtualNetworkGatewaysClient) SetVpnclientIpsecParameters(ctx conte
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.SetVpnclientIpsecParameters")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1243,7 +1265,7 @@ func (client VirtualNetworkGatewaysClient) SetVpnclientIpsecParameters(ctx conte
result, err = client.SetVpnclientIpsecParametersSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "SetVpnclientIpsecParameters", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "SetVpnclientIpsecParameters", nil, "Failure sending request")
return
}
@@ -1281,7 +1303,10 @@ func (client VirtualNetworkGatewaysClient) SetVpnclientIpsecParametersSender(req
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1383,8 +1408,8 @@ func (client VirtualNetworkGatewaysClient) UpdateTags(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1397,7 +1422,7 @@ func (client VirtualNetworkGatewaysClient) UpdateTags(ctx context.Context, resou
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -1435,7 +1460,10 @@ func (client VirtualNetworkGatewaysClient) UpdateTagsSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkpeerings.go
index fe04e3bd2..8cff63869 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkpeerings.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworkpeerings.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -54,8 +43,8 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdate(ctx context.Context, r
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkPeeringsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -68,7 +57,7 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdate(ctx context.Context, r
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -107,7 +96,10 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Reques
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -133,8 +125,8 @@ func (client VirtualNetworkPeeringsClient) Delete(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkPeeringsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -147,7 +139,7 @@ func (client VirtualNetworkPeeringsClient) Delete(ctx context.Context, resourceG
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", nil, "Failure sending request")
return
}
@@ -184,7 +176,10 @@ func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -313,6 +308,7 @@ func (client VirtualNetworkPeeringsClient) List(ctx context.Context, resourceGro
}
if result.vnplr.hasNextLink() && result.vnplr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -374,7 +370,6 @@ func (client VirtualNetworkPeeringsClient) listNextResults(ctx context.Context,
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworks.go
index 2871c8ae1..2c5fa168e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworks.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworks.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -129,8 +118,8 @@ func (client VirtualNetworksClient) CreateOrUpdate(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +132,7 @@ func (client VirtualNetworksClient) CreateOrUpdate(ctx context.Context, resource
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -181,7 +170,10 @@ func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -206,8 +198,8 @@ func (client VirtualNetworksClient) Delete(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -220,7 +212,7 @@ func (client VirtualNetworksClient) Delete(ctx context.Context, resourceGroupNam
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure sending request")
return
}
@@ -256,7 +248,10 @@ func (client VirtualNetworksClient) DeleteSender(req *http.Request) (future Virt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -386,6 +381,7 @@ func (client VirtualNetworksClient) List(ctx context.Context, resourceGroupName
}
if result.vnlr.hasNextLink() && result.vnlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -446,7 +442,6 @@ func (client VirtualNetworksClient) listNextResults(ctx context.Context, lastRes
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -500,6 +495,7 @@ func (client VirtualNetworksClient) ListAll(ctx context.Context) (result Virtual
}
if result.vnlr.hasNextLink() && result.vnlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -559,7 +555,6 @@ func (client VirtualNetworksClient) listAllNextResults(ctx context.Context, last
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -616,6 +611,7 @@ func (client VirtualNetworksClient) ListUsage(ctx context.Context, resourceGroup
}
if result.vnlur.hasNextLink() && result.vnlur.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -677,7 +673,6 @@ func (client VirtualNetworksClient) listUsageNextResults(ctx context.Context, la
result, err = client.ListUsageResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listUsageNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -708,8 +703,8 @@ func (client VirtualNetworksClient) UpdateTags(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -722,7 +717,7 @@ func (client VirtualNetworksClient) UpdateTags(ctx context.Context, resourceGrou
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -760,7 +755,10 @@ func (client VirtualNetworksClient) UpdateTagsSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworktaps.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworktaps.go
index 7a74e9aa6..06cb25449 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworktaps.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualnetworktaps.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VirtualNetworkTapsClient) CreateOrUpdate(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkTapsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -98,7 +87,7 @@ func (client VirtualNetworkTapsClient) CreateOrUpdate(ctx context.Context, resou
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -136,7 +125,10 @@ func (client VirtualNetworkTapsClient) CreateOrUpdateSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -161,8 +153,8 @@ func (client VirtualNetworkTapsClient) Delete(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkTapsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -175,7 +167,7 @@ func (client VirtualNetworkTapsClient) Delete(ctx context.Context, resourceGroup
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "Delete", nil, "Failure sending request")
return
}
@@ -211,7 +203,10 @@ func (client VirtualNetworkTapsClient) DeleteSender(req *http.Request) (future V
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -335,6 +330,7 @@ func (client VirtualNetworkTapsClient) ListAll(ctx context.Context) (result Virt
}
if result.vntlr.hasNextLink() && result.vntlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -394,7 +390,6 @@ func (client VirtualNetworkTapsClient) listAllNextResults(ctx context.Context, l
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "listAllNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -450,6 +445,7 @@ func (client VirtualNetworkTapsClient) ListByResourceGroup(ctx context.Context,
}
if result.vntlr.hasNextLink() && result.vntlr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -510,7 +506,6 @@ func (client VirtualNetworkTapsClient) listByResourceGroupNextResults(ctx contex
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -541,8 +536,8 @@ func (client VirtualNetworkTapsClient) UpdateTags(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkTapsClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -555,7 +550,7 @@ func (client VirtualNetworkTapsClient) UpdateTags(ctx context.Context, resourceG
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -593,7 +588,10 @@ func (client VirtualNetworkTapsClient) UpdateTagsSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualwans.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualwans.go
index 36c29289f..72f17ab14 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualwans.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/virtualwans.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client VirtualWansClient) CreateOrUpdate(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualWansClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client VirtualWansClient) CreateOrUpdate(ctx context.Context, resourceGrou
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client VirtualWansClient) CreateOrUpdateSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client VirtualWansClient) Delete(ctx context.Context, resourceGroupName st
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualWansClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client VirtualWansClient) Delete(ctx context.Context, resourceGroupName st
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client VirtualWansClient) DeleteSender(req *http.Request) (future VirtualW
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -303,6 +298,7 @@ func (client VirtualWansClient) List(ctx context.Context) (result ListVirtualWAN
}
if result.lvwnr.hasNextLink() && result.lvwnr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -362,7 +358,6 @@ func (client VirtualWansClient) listNextResults(ctx context.Context, lastResults
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -418,6 +413,7 @@ func (client VirtualWansClient) ListByResourceGroup(ctx context.Context, resourc
}
if result.lvwnr.hasNextLink() && result.lvwnr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -478,7 +474,6 @@ func (client VirtualWansClient) listByResourceGroupNextResults(ctx context.Conte
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -509,8 +504,8 @@ func (client VirtualWansClient) UpdateTags(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualWansClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -523,7 +518,7 @@ func (client VirtualWansClient) UpdateTags(ctx context.Context, resourceGroupNam
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VirtualWansClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -561,7 +556,10 @@ func (client VirtualWansClient) UpdateTagsSender(req *http.Request) (future Virt
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnconnections.go
index b80468ac2..b912a341f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnconnections.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnconnections.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client VpnConnectionsClient) CreateOrUpdate(ctx context.Context, resourceG
ctx = tracing.StartSpan(ctx, fqdn+"/VpnConnectionsClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -67,7 +56,7 @@ func (client VpnConnectionsClient) CreateOrUpdate(ctx context.Context, resourceG
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnConnectionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnConnectionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -107,7 +96,10 @@ func (client VpnConnectionsClient) CreateOrUpdateSender(req *http.Request) (futu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -133,8 +125,8 @@ func (client VpnConnectionsClient) Delete(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/VpnConnectionsClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -147,7 +139,7 @@ func (client VpnConnectionsClient) Delete(ctx context.Context, resourceGroupName
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnConnectionsClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnConnectionsClient", "Delete", nil, "Failure sending request")
return
}
@@ -184,7 +176,10 @@ func (client VpnConnectionsClient) DeleteSender(req *http.Request) (future VpnCo
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -313,6 +308,7 @@ func (client VpnConnectionsClient) ListByVpnGateway(ctx context.Context, resourc
}
if result.lvcr.hasNextLink() && result.lvcr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -374,7 +370,6 @@ func (client VpnConnectionsClient) listByVpnGatewayNextResults(ctx context.Conte
result, err = client.ListByVpnGatewayResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnConnectionsClient", "listByVpnGatewayNextResults", resp, "Failure responding to next results request")
- return
}
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpngateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpngateways.go
index 6c6529069..926e6fc1b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpngateways.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpngateways.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client VpnGatewaysClient) CreateOrUpdate(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/VpnGatewaysClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client VpnGatewaysClient) CreateOrUpdate(ctx context.Context, resourceGrou
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client VpnGatewaysClient) CreateOrUpdateSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client VpnGatewaysClient) Delete(ctx context.Context, resourceGroupName st
ctx = tracing.StartSpan(ctx, fqdn+"/VpnGatewaysClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client VpnGatewaysClient) Delete(ctx context.Context, resourceGroupName st
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client VpnGatewaysClient) DeleteSender(req *http.Request) (future VpnGatew
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -303,6 +298,7 @@ func (client VpnGatewaysClient) List(ctx context.Context) (result ListVpnGateway
}
if result.lvgr.hasNextLink() && result.lvgr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -362,7 +358,6 @@ func (client VpnGatewaysClient) listNextResults(ctx context.Context, lastResults
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -418,6 +413,7 @@ func (client VpnGatewaysClient) ListByResourceGroup(ctx context.Context, resourc
}
if result.lvgr.hasNextLink() && result.lvgr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -478,7 +474,6 @@ func (client VpnGatewaysClient) listByResourceGroupNextResults(ctx context.Conte
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -509,8 +504,8 @@ func (client VpnGatewaysClient) UpdateTags(ctx context.Context, resourceGroupNam
ctx = tracing.StartSpan(ctx, fqdn+"/VpnGatewaysClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -523,7 +518,7 @@ func (client VpnGatewaysClient) UpdateTags(ctx context.Context, resourceGroupNam
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnGatewaysClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -561,7 +556,10 @@ func (client VpnGatewaysClient) UpdateTagsSender(req *http.Request) (future VpnG
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsites.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsites.go
index e258d0804..9a1144999 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsites.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsites.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -51,8 +40,8 @@ func (client VpnSitesClient) CreateOrUpdate(ctx context.Context, resourceGroupNa
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.CreateOrUpdate")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -65,7 +54,7 @@ func (client VpnSitesClient) CreateOrUpdate(ctx context.Context, resourceGroupNa
result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client VpnSitesClient) CreateOrUpdateSender(req *http.Request) (future Vpn
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -129,8 +121,8 @@ func (client VpnSitesClient) Delete(ctx context.Context, resourceGroupName strin
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -143,7 +135,7 @@ func (client VpnSitesClient) Delete(ctx context.Context, resourceGroupName strin
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Delete", nil, "Failure sending request")
return
}
@@ -179,7 +171,10 @@ func (client VpnSitesClient) DeleteSender(req *http.Request) (future VpnSitesDel
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -303,6 +298,7 @@ func (client VpnSitesClient) List(ctx context.Context) (result ListVpnSitesResul
}
if result.lvsr.hasNextLink() && result.lvsr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -362,7 +358,6 @@ func (client VpnSitesClient) listNextResults(ctx context.Context, lastResults Li
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "listNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -418,6 +413,7 @@ func (client VpnSitesClient) ListByResourceGroup(ctx context.Context, resourceGr
}
if result.lvsr.hasNextLink() && result.lvsr.IsEmpty() {
err = result.NextWithContext(ctx)
+ return
}
return
@@ -478,7 +474,6 @@ func (client VpnSitesClient) listByResourceGroupNextResults(ctx context.Context,
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- return
}
return
}
@@ -509,8 +504,8 @@ func (client VpnSitesClient) UpdateTags(ctx context.Context, resourceGroupName s
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.UpdateTags")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -523,7 +518,7 @@ func (client VpnSitesClient) UpdateTags(ctx context.Context, resourceGroupName s
result, err = client.UpdateTagsSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "UpdateTags", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "UpdateTags", nil, "Failure sending request")
return
}
@@ -561,7 +556,10 @@ func (client VpnSitesClient) UpdateTagsSender(req *http.Request) (future VpnSite
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsitesconfiguration.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsitesconfiguration.go
index fd66abe32..f005ec637 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsitesconfiguration.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/vpnsitesconfiguration.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -52,8 +41,8 @@ func (client VpnSitesConfigurationClient) Download(ctx context.Context, resource
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesConfigurationClient.Download")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -66,7 +55,7 @@ func (client VpnSitesConfigurationClient) Download(ctx context.Context, resource
result, err = client.DownloadSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.VpnSitesConfigurationClient", "Download", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.VpnSitesConfigurationClient", "Download", nil, "Failure sending request")
return
}
@@ -104,7 +93,10 @@ func (client VpnSitesConfigurationClient) DownloadSender(req *http.Request) (fut
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/watchers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/watchers.go
index b9fa2e517..be446936f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/watchers.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network/watchers.go
@@ -1,18 +1,7 @@
package network
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
@@ -53,8 +42,8 @@ func (client WatchersClient) CheckConnectivity(ctx context.Context, resourceGrou
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.CheckConnectivity")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -75,7 +64,7 @@ func (client WatchersClient) CheckConnectivity(ctx context.Context, resourceGrou
result, err = client.CheckConnectivitySender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "CheckConnectivity", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "CheckConnectivity", nil, "Failure sending request")
return
}
@@ -113,7 +102,10 @@ func (client WatchersClient) CheckConnectivitySender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -217,8 +209,8 @@ func (client WatchersClient) Delete(ctx context.Context, resourceGroupName strin
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.Delete")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -231,7 +223,7 @@ func (client WatchersClient) Delete(ctx context.Context, resourceGroupName strin
result, err = client.DeleteSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "Delete", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "Delete", nil, "Failure sending request")
return
}
@@ -267,7 +259,10 @@ func (client WatchersClient) DeleteSender(req *http.Request) (future WatchersDel
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -369,8 +364,8 @@ func (client WatchersClient) GetAzureReachabilityReport(ctx context.Context, res
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.GetAzureReachabilityReport")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -392,7 +387,7 @@ func (client WatchersClient) GetAzureReachabilityReport(ctx context.Context, res
result, err = client.GetAzureReachabilityReportSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetAzureReachabilityReport", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetAzureReachabilityReport", nil, "Failure sending request")
return
}
@@ -430,7 +425,10 @@ func (client WatchersClient) GetAzureReachabilityReportSender(req *http.Request)
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -456,8 +454,8 @@ func (client WatchersClient) GetFlowLogStatus(ctx context.Context, resourceGroup
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.GetFlowLogStatus")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -476,7 +474,7 @@ func (client WatchersClient) GetFlowLogStatus(ctx context.Context, resourceGroup
result, err = client.GetFlowLogStatusSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetFlowLogStatus", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetFlowLogStatus", nil, "Failure sending request")
return
}
@@ -514,7 +512,10 @@ func (client WatchersClient) GetFlowLogStatusSender(req *http.Request) (future W
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -540,8 +541,8 @@ func (client WatchersClient) GetNetworkConfigurationDiagnostic(ctx context.Conte
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.GetNetworkConfigurationDiagnostic")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -561,7 +562,7 @@ func (client WatchersClient) GetNetworkConfigurationDiagnostic(ctx context.Conte
result, err = client.GetNetworkConfigurationDiagnosticSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetNetworkConfigurationDiagnostic", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetNetworkConfigurationDiagnostic", nil, "Failure sending request")
return
}
@@ -599,7 +600,10 @@ func (client WatchersClient) GetNetworkConfigurationDiagnosticSender(req *http.R
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -625,8 +629,8 @@ func (client WatchersClient) GetNextHop(ctx context.Context, resourceGroupName s
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.GetNextHop")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -647,7 +651,7 @@ func (client WatchersClient) GetNextHop(ctx context.Context, resourceGroupName s
result, err = client.GetNextHopSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetNextHop", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetNextHop", nil, "Failure sending request")
return
}
@@ -685,7 +689,10 @@ func (client WatchersClient) GetNextHopSender(req *http.Request) (future Watcher
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -790,8 +797,8 @@ func (client WatchersClient) GetTroubleshooting(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.GetTroubleshooting")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -814,7 +821,7 @@ func (client WatchersClient) GetTroubleshooting(ctx context.Context, resourceGro
result, err = client.GetTroubleshootingSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshooting", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshooting", nil, "Failure sending request")
return
}
@@ -852,7 +859,10 @@ func (client WatchersClient) GetTroubleshootingSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -878,8 +888,8 @@ func (client WatchersClient) GetTroubleshootingResult(ctx context.Context, resou
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.GetTroubleshootingResult")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -898,7 +908,7 @@ func (client WatchersClient) GetTroubleshootingResult(ctx context.Context, resou
result, err = client.GetTroubleshootingResultSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshootingResult", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshootingResult", nil, "Failure sending request")
return
}
@@ -936,7 +946,10 @@ func (client WatchersClient) GetTroubleshootingResultSender(req *http.Request) (
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -962,8 +975,8 @@ func (client WatchersClient) GetVMSecurityRules(ctx context.Context, resourceGro
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.GetVMSecurityRules")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -982,7 +995,7 @@ func (client WatchersClient) GetVMSecurityRules(ctx context.Context, resourceGro
result, err = client.GetVMSecurityRulesSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetVMSecurityRules", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetVMSecurityRules", nil, "Failure sending request")
return
}
@@ -1020,7 +1033,10 @@ func (client WatchersClient) GetVMSecurityRulesSender(req *http.Request) (future
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1191,8 +1207,8 @@ func (client WatchersClient) ListAvailableProviders(ctx context.Context, resourc
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.ListAvailableProviders")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1205,7 +1221,7 @@ func (client WatchersClient) ListAvailableProviders(ctx context.Context, resourc
result, err = client.ListAvailableProvidersSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAvailableProviders", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAvailableProviders", nil, "Failure sending request")
return
}
@@ -1243,7 +1259,10 @@ func (client WatchersClient) ListAvailableProvidersSender(req *http.Request) (fu
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1269,8 +1288,8 @@ func (client WatchersClient) SetFlowLogConfiguration(ctx context.Context, resour
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.SetFlowLogConfiguration")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1301,7 +1320,7 @@ func (client WatchersClient) SetFlowLogConfiguration(ctx context.Context, resour
result, err = client.SetFlowLogConfigurationSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "SetFlowLogConfiguration", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "SetFlowLogConfiguration", nil, "Failure sending request")
return
}
@@ -1339,7 +1358,10 @@ func (client WatchersClient) SetFlowLogConfigurationSender(req *http.Request) (f
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
@@ -1444,8 +1466,8 @@ func (client WatchersClient) VerifyIPFlow(ctx context.Context, resourceGroupName
ctx = tracing.StartSpan(ctx, fqdn+"/WatchersClient.VerifyIPFlow")
defer func() {
sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -1468,7 +1490,7 @@ func (client WatchersClient) VerifyIPFlow(ctx context.Context, resourceGroupName
result, err = client.VerifyIPFlowSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "network.WatchersClient", "VerifyIPFlow", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "network.WatchersClient", "VerifyIPFlow", nil, "Failure sending request")
return
}
@@ -1506,7 +1528,10 @@ func (client WatchersClient) VerifyIPFlowSender(req *http.Request) (future Watch
if err != nil {
return
}
- future.Future, err = azure.NewFutureFromResponse(resp)
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
index c863e6e50..75a813341 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
@@ -1,21 +1,7 @@
package version
-// Copyright (c) Microsoft and contributors. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
// Number contains the semantic version of this SDK.
-const Number = "v49.2.0"
+const Number = "v57.1.0"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
index abcc27d4c..6ade6802f 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
@@ -1,12 +1,13 @@
module github.com/Azure/go-autorest/autorest/adal
-go 1.12
+go 1.15
require (
github.com/Azure/go-autorest v14.2.0+incompatible
github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/mocks v0.4.1
+ github.com/Azure/go-autorest/logger v0.2.1
github.com/Azure/go-autorest/tracing v0.6.0
- github.com/form3tech-oss/jwt-go v3.2.2+incompatible
+ github.com/golang-jwt/jwt/v4 v4.0.0
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
index 9d55b0f59..2b63c7dc1 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
@@ -4,16 +4,16 @@ github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8K
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
index 7551b7923..647a61bb8 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
@@ -1,3 +1,4 @@
+//go:build modhack
// +build modhack
package adal
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
index d7e4372bb..1826a68dc 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
@@ -28,6 +28,7 @@ const (
mimeTypeFormPost = "application/x-www-form-urlencoded"
)
+// DO NOT ACCESS THIS DIRECTLY. go through sender()
var defaultSender Sender
var defaultSenderInit = &sync.Once{}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
index addc91099..20767178c 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
@@ -36,7 +36,8 @@ import (
"time"
"github.com/Azure/go-autorest/autorest/date"
- "github.com/form3tech-oss/jwt-go"
+ "github.com/Azure/go-autorest/logger"
+ "github.com/golang-jwt/jwt/v4"
)
const (
@@ -70,13 +71,13 @@ const (
defaultMaxMSIRefreshAttempts = 5
// asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions
- asMSIEndpointEnv = "MSI_ENDPOINT"
+ msiEndpointEnv = "MSI_ENDPOINT"
// asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions
- asMSISecretEnv = "MSI_SECRET"
+ msiSecretEnv = "MSI_SECRET"
- // the API version to use for the App Service MSI endpoint
- appServiceAPIVersion = "2017-09-01"
+ // the API version to use for the legacy App Service MSI endpoint
+ appServiceAPIVersion2017 = "2017-09-01"
// secret header used when authenticating against app service MSI endpoint
secretHeader = "Secret"
@@ -292,6 +293,8 @@ func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) {
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
type ServicePrincipalMSISecret struct {
+ msiType msiType
+ clientResourceID string
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
@@ -662,96 +665,173 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie
)
}
-// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
-func GetMSIVMEndpoint() (string, error) {
- return msiEndpoint, nil
+type msiType int
+
+const (
+ msiTypeUnavailable msiType = iota
+ msiTypeAppServiceV20170901
+ msiTypeCloudShell
+ msiTypeIMDS
+)
+
+func (m msiType) String() string {
+ switch m {
+ case msiTypeUnavailable:
+ return "unavailable"
+ case msiTypeAppServiceV20170901:
+ return "AppServiceV20170901"
+ case msiTypeCloudShell:
+ return "CloudShell"
+ case msiTypeIMDS:
+ return "IMDS"
+ default:
+ return fmt.Sprintf("unhandled MSI type %d", m)
+ }
}
-// NOTE: this only indicates if the ASE environment credentials have been set
-// which does not necessarily mean that the caller is authenticating via ASE!
-func isAppService() bool {
- _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
- _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv)
+// returns the MSI type and endpoint, or an error
+func getMSIType() (msiType, string, error) {
+ if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" {
+ // if the env var MSI_ENDPOINT is set
+ if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" {
+ // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService
+ return msiTypeAppServiceV20170901, endpointEnvVar, nil
+ }
+ // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell
+ return msiTypeCloudShell, endpointEnvVar, nil
+ } else if msiAvailableHook(context.Background(), sender()) {
+ // if MSI_ENDPOINT is NOT set AND the IMDS endpoint is available the msiType is IMDS. This will timeout after 500 milliseconds
+ return msiTypeIMDS, msiEndpoint, nil
+ } else {
+ // if MSI_ENDPOINT is NOT set and IMDS endpoint is not available Managed Identity is not available
+ return msiTypeUnavailable, "", errors.New("MSI not available")
+ }
+}
- return asMSIEndpointEnvExists && asMSISecretEnvExists
+// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
+// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell.
+// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
+func GetMSIVMEndpoint() (string, error) {
+ return msiEndpoint, nil
}
-// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions
+// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions.
+// It will return an error when not running in an app service/functions environment.
+// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
func GetMSIAppServiceEndpoint() (string, error) {
- asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
-
- if asMSIEndpointEnvExists {
- return asMSIEndpoint, nil
+ msiType, endpoint, err := getMSIType()
+ if err != nil {
+ return "", err
+ }
+ switch msiType {
+ case msiTypeAppServiceV20170901:
+ return endpoint, nil
+ default:
+ return "", fmt.Errorf("%s is not app service environment", msiType)
}
- return "", errors.New("MSI endpoint not found")
}
// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment
+// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
func GetMSIEndpoint() (string, error) {
- if isAppService() {
- return GetMSIAppServiceEndpoint()
- }
- return GetMSIVMEndpoint()
+ _, endpoint, err := getMSIType()
+ return endpoint, err
}
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the system assigned identity when creating the token.
+// msiEndpoint - empty string, or pass a non-empty string to override the default value.
+// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
- return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...)
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...)
}
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the clientID of specified user assigned identity when creating the token.
+// msiEndpoint - empty string, or pass a non-empty string to override the default value.
+// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
- return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...)
+ if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil {
+ return nil, err
+ }
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...)
}
// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the azure resource id of user assigned identity when creating the token.
+// msiEndpoint - empty string, or pass a non-empty string to override the default value.
+// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
- return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...)
+ if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil {
+ return nil, err
+ }
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...)
}
-func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
- if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
- return nil, err
+// ManagedIdentityOptions contains optional values for configuring managed identity authentication.
+type ManagedIdentityOptions struct {
+ // ClientID is the user-assigned identity to use during authentication.
+ // It is mutually exclusive with IdentityResourceID.
+ ClientID string
+
+ // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication.
+ // It is mutually exclusive with ClientID.
+ IdentityResourceID string
+}
+
+// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity.
+// It supports the following managed identity environments.
+// - App Service Environment (API version 2017-09-01 only)
+// - Cloud shell
+// - IMDS with a system or user assigned identity
+func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if options == nil {
+ options = &ManagedIdentityOptions{}
}
+ return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...)
+}
+
+func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
- if userAssignedID != nil {
- if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil {
- return nil, err
- }
- }
- if identityResourceID != nil {
- if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil {
- return nil, err
- }
+ if userAssignedID != "" && identityResourceID != "" {
+ return nil, errors.New("cannot specify userAssignedID and identityResourceID")
}
- // We set the oauth config token endpoint to be MSI's endpoint
- msiEndpointURL, err := url.Parse(msiEndpoint)
+ msiType, endpoint, err := getMSIType()
if err != nil {
+ logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v\n", err)
return nil, err
}
-
- v := url.Values{}
- v.Set("resource", resource)
- // we only support token API version 2017-09-01 for app services
- clientIDParam := "client_id"
- if isASEEndpoint(*msiEndpointURL) {
- v.Set("api-version", appServiceAPIVersion)
- clientIDParam = "clientid"
- } else {
- v.Set("api-version", msiAPIVersion)
+ logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s\n", msiType, endpoint)
+ if msiEndpoint != "" {
+ endpoint = msiEndpoint
+ logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s\n", endpoint)
}
- if userAssignedID != nil {
- v.Set(clientIDParam, *userAssignedID)
+ msiEndpointURL, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
}
- if identityResourceID != nil {
- v.Set("mi_res_id", *identityResourceID)
+ // cloud shell sends its data in the request body
+ if msiType != msiTypeCloudShell {
+ v := url.Values{}
+ v.Set("resource", resource)
+ clientIDParam := "client_id"
+ switch msiType {
+ case msiTypeAppServiceV20170901:
+ clientIDParam = "clientid"
+ v.Set("api-version", appServiceAPIVersion2017)
+ break
+ case msiTypeIMDS:
+ v.Set("api-version", msiAPIVersion)
+ }
+ if userAssignedID != "" {
+ v.Set(clientIDParam, userAssignedID)
+ } else if identityResourceID != "" {
+ v.Set("mi_res_id", identityResourceID)
+ }
+ msiEndpointURL.RawQuery = v.Encode()
}
- msiEndpointURL.RawQuery = v.Encode()
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
@@ -759,10 +839,14 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
OauthConfig: OAuthConfig{
TokenEndpoint: *msiEndpointURL,
},
- Secret: &ServicePrincipalMSISecret{},
+ Secret: &ServicePrincipalMSISecret{
+ msiType: msiType,
+ clientResourceID: identityResourceID,
+ },
Resource: resource,
AutoRefresh: true,
RefreshWithin: defaultRefresh,
+ ClientID: userAssignedID,
},
refreshLock: &sync.RWMutex{},
sender: sender(),
@@ -770,10 +854,6 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
}
- if userAssignedID != nil {
- spt.inner.ClientID = *userAssignedID
- }
-
return spt, nil
}
@@ -870,31 +950,6 @@ func (spt *ServicePrincipalToken) getGrantType() string {
}
}
-func isIMDS(u url.URL) bool {
- return isMSIEndpoint(u) == true || isASEEndpoint(u) == true
-}
-
-func isMSIEndpoint(endpoint url.URL) bool {
- msi, err := url.Parse(msiEndpoint)
- if err != nil {
- return false
- }
- return endpoint.Host == msi.Host && endpoint.Path == msi.Path
-}
-
-func isASEEndpoint(endpoint url.URL) bool {
- aseEndpoint, err := GetMSIAppServiceEndpoint()
- if err != nil {
- // app service environment isn't enabled
- return false
- }
- ase, err := url.Parse(aseEndpoint)
- if err != nil {
- return false
- }
- return endpoint.Host == ase.Host && endpoint.Path == ase.Path
-}
-
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
if spt.customRefreshFunc != nil {
token, err := spt.customRefreshFunc(ctx, resource)
@@ -909,13 +964,40 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
}
req.Header.Add("User-Agent", UserAgent())
- // Add header when runtime is on App Service or Functions
- if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
- asMSISecret, _ := os.LookupEnv(asMSISecretEnv)
- req.Header.Add(secretHeader, asMSISecret)
- }
req = req.WithContext(ctx)
- if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
+ var resp *http.Response
+ authBodyFilter := func(b []byte) []byte {
+ if logger.Level() != logger.LogAuth {
+ return []byte("**REDACTED** authentication body")
+ }
+ return b
+ }
+ if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
+ switch msiSecret.msiType {
+ case msiTypeAppServiceV20170901:
+ req.Method = http.MethodGet
+ req.Header.Set("secret", os.Getenv(msiSecretEnv))
+ break
+ case msiTypeCloudShell:
+ req.Header.Set("Metadata", "true")
+ data := url.Values{}
+ data.Set("resource", spt.inner.Resource)
+ if spt.inner.ClientID != "" {
+ data.Set("client_id", spt.inner.ClientID)
+ } else if msiSecret.clientResourceID != "" {
+ data.Set("msi_res_id", msiSecret.clientResourceID)
+ }
+ req.Body = ioutil.NopCloser(strings.NewReader(data.Encode()))
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ break
+ case msiTypeIMDS:
+ req.Method = http.MethodGet
+ req.Header.Set("Metadata", "true")
+ break
+ }
+ logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter})
+ resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
+ } else {
v := url.Values{}
v.Set("client_id", spt.inner.ClientID)
v.Set("resource", resource)
@@ -944,35 +1026,18 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
req.Body = body
- }
-
- if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
- req.Method = http.MethodGet
- // the metadata header isn't applicable for ASE
- if !isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
- req.Header.Set(metadataHeader, "true")
- }
- }
-
- var resp *http.Response
- if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
- resp, err = getMSIEndpoint(ctx, spt.sender)
- if err != nil {
- // return a TokenRefreshError here so that we don't keep retrying
- return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil)
- }
- resp.Body.Close()
- }
- if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
- resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
- } else {
+ logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter})
resp, err = spt.sender.Do(req)
}
+
+ // don't return a TokenRefreshError here; this will allow retry logic to apply
if err != nil {
- // don't return a TokenRefreshError here; this will allow retry logic to apply
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
+ } else if resp == nil {
+ return fmt.Errorf("adal: received nil response and error")
}
+ logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter})
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
@@ -1264,3 +1329,8 @@ func MSIAvailable(ctx context.Context, sender Sender) bool {
}
return err == nil
}
+
+// used for testing purposes
+var msiAvailableHook = func(ctx context.Context, sender Sender) bool {
+ return MSIAvailable(ctx, sender)
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go
index 54b4defef..aa5ea47d9 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go
@@ -1,3 +1,4 @@
+//go:build go1.13
// +build go1.13
// Copyright 2017 Microsoft Corporation
@@ -24,8 +25,6 @@ import (
)
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
- // this cannot fail, the return sig is due to legacy reasons
- msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
// http.NewRequestWithContext() was added in Go 1.13
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go
index 6d73bae15..6a4690a25 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go
@@ -1,3 +1,4 @@
+//go:build !go1.13
// +build !go1.13
// Copyright 2017 Microsoft Corporation
@@ -23,8 +24,6 @@ import (
)
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
- // this cannot fail, the return sig is due to legacy reasons
- msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
index 42e28cf2e..45575eedb 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/logger"
"github.com/Azure/go-autorest/tracing"
)
@@ -215,6 +216,7 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
}
// if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll
if delay, ok := f.GetPollingDelay(); ok {
+ logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: initial polling delay")
if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed {
err = cancelCtx.Err()
return
@@ -234,12 +236,14 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
var ok bool
delay, ok = f.GetPollingDelay()
if !ok {
+ logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: Using client polling delay")
delay = client.PollingDelay
}
} else {
// there was an error polling for status so perform exponential
// back-off based on the number of attempts using the client's retry
// duration. update attempts after delayAttempt to avoid off-by-one.
+ logger.Instance.Writef(logger.LogError, "WaitForCompletionRef: %s\n", err)
delayAttempt = attempts
delay = client.RetryDuration
attempts++
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
index a0b969dff..b6c6314f0 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
@@ -37,6 +37,9 @@ const (
// should be included in the response.
HeaderReturnClientID = "x-ms-return-client-request-id"
+ // HeaderContentType is the type of the content in the HTTP response.
+ HeaderContentType = "Content-Type"
+
// HeaderRequestID is the Azure extension header of the service generated request ID returned
// in the response.
HeaderRequestID = "x-ms-request-id"
@@ -89,54 +92,85 @@ func (se ServiceError) Error() string {
// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type.
func (se *ServiceError) UnmarshalJSON(b []byte) error {
- // per the OData v4 spec the details field must be an array of JSON objects.
- // unfortunately not all services adhear to the spec and just return a single
- // object instead of an array with one object. so we have to perform some
- // shenanigans to accommodate both cases.
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
- type serviceError1 struct {
+ type serviceErrorInternal struct {
Code string `json:"code"`
Message string `json:"message"`
- Target *string `json:"target"`
- Details []map[string]interface{} `json:"details"`
- InnerError map[string]interface{} `json:"innererror"`
- AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
+ Target *string `json:"target,omitempty"`
+ AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"`
+ // not all services conform to the OData v4 spec.
+ // the following fields are where we've seen discrepancies
+
+ // spec calls for []map[string]interface{} but have seen map[string]interface{}
+ Details interface{} `json:"details,omitempty"`
+
+ // spec calls for map[string]interface{} but have seen []map[string]interface{} and string
+ InnerError interface{} `json:"innererror,omitempty"`
}
- type serviceError2 struct {
- Code string `json:"code"`
- Message string `json:"message"`
- Target *string `json:"target"`
- Details map[string]interface{} `json:"details"`
- InnerError map[string]interface{} `json:"innererror"`
- AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
+ sei := serviceErrorInternal{}
+ if err := json.Unmarshal(b, &sei); err != nil {
+ return err
}
- se1 := serviceError1{}
- err := json.Unmarshal(b, &se1)
- if err == nil {
- se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo)
- return nil
+ // copy the fields we know to be correct
+ se.AdditionalInfo = sei.AdditionalInfo
+ se.Code = sei.Code
+ se.Message = sei.Message
+ se.Target = sei.Target
+
+ // converts an []interface{} to []map[string]interface{}
+ arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) {
+ arrayOf, ok := v.([]interface{})
+ if !ok {
+ return nil, false
+ }
+ final := []map[string]interface{}{}
+ for _, item := range arrayOf {
+ as, ok := item.(map[string]interface{})
+ if !ok {
+ return nil, false
+ }
+ final = append(final, as)
+ }
+ return final, true
}
- se2 := serviceError2{}
- err = json.Unmarshal(b, &se2)
- if err == nil {
- se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo)
- se.Details = append(se.Details, se2.Details)
- return nil
+ // convert the remaining fields, falling back to raw JSON if necessary
+
+ if c, ok := arrayOfObjs(sei.Details); ok {
+ se.Details = c
+ } else if c, ok := sei.Details.(map[string]interface{}); ok {
+ se.Details = []map[string]interface{}{c}
+ } else if sei.Details != nil {
+ // stuff into Details
+ se.Details = []map[string]interface{}{
+ {"raw": sei.Details},
+ }
}
- return err
-}
-func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) {
- se.Code = code
- se.Message = message
- se.Target = target
- se.Details = details
- se.InnerError = inner
- se.AdditionalInfo = additional
+ if c, ok := sei.InnerError.(map[string]interface{}); ok {
+ se.InnerError = c
+ } else if c, ok := arrayOfObjs(sei.InnerError); ok {
+ // if there's only one error extract it
+ if len(c) == 1 {
+ se.InnerError = c[0]
+ } else {
+ // multiple errors, stuff them into the value
+ se.InnerError = map[string]interface{}{
+ "multi": c,
+ }
+ }
+ } else if c, ok := sei.InnerError.(string); ok {
+ se.InnerError = map[string]interface{}{"error": c}
+ } else if sei.InnerError != nil {
+ // stuff into InnerError
+ se.InnerError = map[string]interface{}{
+ "raw": sei.InnerError,
+ }
+ }
+ return nil
}
// RequestError describes an error response returned by Azure service.
@@ -177,7 +211,7 @@ func (r Resource) String() string {
}
// ParseResourceID parses a resource ID into a ResourceDetails struct.
-// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4.
+// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid.
func ParseResourceID(resourceID string) (Resource, error) {
const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)`
@@ -307,16 +341,30 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
// Check if error is unwrapped ServiceError
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&e.ServiceError); err != nil {
- return err
+ return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err)
+ }
+
+ // for example, should the API return the literal value `null` as the response
+ if e.ServiceError == nil {
+ e.ServiceError = &ServiceError{
+ Code: "Unknown",
+ Message: "Unknown service error",
+ Details: []map[string]interface{}{
+ {
+ "HttpResponse.Body": b.String(),
+ },
+ },
+ }
}
}
- if e.ServiceError.Message == "" {
+
+ if e.ServiceError != nil && e.ServiceError.Message == "" {
// if we're here it means the returned error wasn't OData v4 compliant.
// try to unmarshal the body in hopes of getting something.
rawBody := map[string]interface{}{}
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&rawBody); err != nil {
- return err
+ return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err)
}
e.ServiceError = &ServiceError{
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
index 9bbc0899e..3b61a2b6e 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
@@ -45,6 +45,7 @@ type ResourceIdentifier struct {
Datalake string `json:"datalake"`
Batch string `json:"batch"`
OperationalInsights string `json:"operationalInsights"`
+ OSSRDBMS string `json:"ossRDBMS"`
Storage string `json:"storage"`
Synapse string `json:"synapse"`
ServiceBus string `json:"serviceBus"`
@@ -64,6 +65,10 @@ type Environment struct {
ServiceBusEndpoint string `json:"serviceBusEndpoint"`
BatchManagementEndpoint string `json:"batchManagementEndpoint"`
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
+ CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
+ MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"`
+ MySQLDatabaseDNSSuffix string `json:"mySqlDatabaseDNSSuffix"`
+ PostgresqlDatabaseDNSSuffix string `json:"postgresqlDatabaseDNSSuffix"`
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
@@ -71,7 +76,6 @@ type Environment struct {
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
- CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
TokenAudience string `json:"tokenAudience"`
APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"`
SynapseEndpointSuffix string `json:"synapseEndpointSuffix"`
@@ -93,6 +97,10 @@ var (
ServiceBusEndpoint: "https://servicebus.windows.net/",
BatchManagementEndpoint: "https://batch.core.windows.net/",
StorageEndpointSuffix: "core.windows.net",
+ CosmosDBDNSSuffix: "documents.azure.com",
+ MariaDBDNSSuffix: "mariadb.database.azure.com",
+ MySQLDatabaseDNSSuffix: "mysql.database.azure.com",
+ PostgresqlDatabaseDNSSuffix: "postgres.database.azure.com",
SQLDatabaseDNSSuffix: "database.windows.net",
TrafficManagerDNSSuffix: "trafficmanager.net",
KeyVaultDNSSuffix: "vault.azure.net",
@@ -100,7 +108,6 @@ var (
ServiceManagementVMDNSSuffix: "cloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
ContainerRegistryDNSSuffix: "azurecr.io",
- CosmosDBDNSSuffix: "documents.azure.com",
TokenAudience: "https://management.azure.com/",
APIManagementHostNameSuffix: "azure-api.net",
SynapseEndpointSuffix: "dev.azuresynapse.net",
@@ -110,6 +117,7 @@ var (
Datalake: "https://datalake.azure.net/",
Batch: "https://batch.core.windows.net/",
OperationalInsights: "https://api.loganalytics.io",
+ OSSRDBMS: "https://ossrdbms-aad.database.windows.net",
Storage: "https://storage.azure.com/",
Synapse: "https://dev.azuresynapse.net",
ServiceBus: "https://servicebus.azure.net/",
@@ -130,6 +138,10 @@ var (
ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/",
BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/",
StorageEndpointSuffix: "core.usgovcloudapi.net",
+ CosmosDBDNSSuffix: "documents.azure.us",
+ MariaDBDNSSuffix: "mariadb.database.usgovcloudapi.net",
+ MySQLDatabaseDNSSuffix: "mysql.database.usgovcloudapi.net",
+ PostgresqlDatabaseDNSSuffix: "postgres.database.usgovcloudapi.net",
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
@@ -137,7 +149,6 @@ var (
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net",
ContainerRegistryDNSSuffix: "azurecr.us",
- CosmosDBDNSSuffix: "documents.azure.us",
TokenAudience: "https://management.usgovcloudapi.net/",
APIManagementHostNameSuffix: "azure-api.us",
SynapseEndpointSuffix: NotAvailable,
@@ -147,6 +158,7 @@ var (
Datalake: NotAvailable,
Batch: "https://batch.core.usgovcloudapi.net/",
OperationalInsights: "https://api.loganalytics.us",
+ OSSRDBMS: "https://ossrdbms-aad.database.usgovcloudapi.net",
Storage: "https://storage.azure.com/",
Synapse: NotAvailable,
ServiceBus: "https://servicebus.azure.net/",
@@ -167,6 +179,10 @@ var (
ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/",
BatchManagementEndpoint: "https://batch.chinacloudapi.cn/",
StorageEndpointSuffix: "core.chinacloudapi.cn",
+ CosmosDBDNSSuffix: "documents.azure.cn",
+ MariaDBDNSSuffix: "mariadb.database.chinacloudapi.cn",
+ MySQLDatabaseDNSSuffix: "mysql.database.chinacloudapi.cn",
+ PostgresqlDatabaseDNSSuffix: "postgres.database.chinacloudapi.cn",
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
TrafficManagerDNSSuffix: "trafficmanager.cn",
KeyVaultDNSSuffix: "vault.azure.cn",
@@ -174,7 +190,6 @@ var (
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn",
ContainerRegistryDNSSuffix: "azurecr.cn",
- CosmosDBDNSSuffix: "documents.azure.cn",
TokenAudience: "https://management.chinacloudapi.cn/",
APIManagementHostNameSuffix: "azure-api.cn",
SynapseEndpointSuffix: "dev.azuresynapse.azure.cn",
@@ -184,6 +199,7 @@ var (
Datalake: NotAvailable,
Batch: "https://batch.chinacloudapi.cn/",
OperationalInsights: NotAvailable,
+ OSSRDBMS: "https://ossrdbms-aad.database.chinacloudapi.cn",
Storage: "https://storage.azure.com/",
Synapse: "https://dev.azuresynapse.net",
ServiceBus: "https://servicebus.azure.net/",
@@ -204,6 +220,10 @@ var (
ServiceBusEndpoint: "https://servicebus.cloudapi.de/",
BatchManagementEndpoint: "https://batch.cloudapi.de/",
StorageEndpointSuffix: "core.cloudapi.de",
+ CosmosDBDNSSuffix: "documents.microsoftazure.de",
+ MariaDBDNSSuffix: "mariadb.database.cloudapi.de",
+ MySQLDatabaseDNSSuffix: "mysql.database.cloudapi.de",
+ PostgresqlDatabaseDNSSuffix: "postgres.database.cloudapi.de",
SQLDatabaseDNSSuffix: "database.cloudapi.de",
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
KeyVaultDNSSuffix: "vault.microsoftazure.de",
@@ -211,7 +231,6 @@ var (
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
ContainerRegistryDNSSuffix: NotAvailable,
- CosmosDBDNSSuffix: "documents.microsoftazure.de",
TokenAudience: "https://management.microsoftazure.de/",
APIManagementHostNameSuffix: NotAvailable,
SynapseEndpointSuffix: NotAvailable,
@@ -221,6 +240,7 @@ var (
Datalake: NotAvailable,
Batch: "https://batch.cloudapi.de/",
OperationalInsights: NotAvailable,
+ OSSRDBMS: "https://ossrdbms-aad.database.cloudapi.de",
Storage: "https://storage.azure.com/",
Synapse: NotAvailable,
ServiceBus: "https://servicebus.azure.net/",
diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go
index 898db8b95..bb5f9396e 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/client.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/client.go
@@ -17,6 +17,7 @@ package autorest
import (
"bytes"
"crypto/tls"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -30,7 +31,7 @@ import (
const (
// DefaultPollingDelay is a reasonable delay between polling requests.
- DefaultPollingDelay = 60 * time.Second
+ DefaultPollingDelay = 30 * time.Second
// DefaultPollingDuration is a reasonable total polling duration.
DefaultPollingDuration = 15 * time.Minute
@@ -260,6 +261,9 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
},
})
resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r)
+ if resp == nil && err == nil {
+ err = errors.New("autorest: received nil response and error")
+ }
logger.Instance.WriteResponse(resp, logger.Filter{})
Respond(resp, c.ByInspecting())
return resp, err
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod
index 75a534f10..c27bef1b7 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/go.mod
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod
@@ -1,12 +1,12 @@
module github.com/Azure/go-autorest/autorest
-go 1.12
+go 1.15
require (
github.com/Azure/go-autorest v14.2.0+incompatible
- github.com/Azure/go-autorest/autorest/adal v0.9.5
+ github.com/Azure/go-autorest/autorest/adal v0.9.13
github.com/Azure/go-autorest/autorest/mocks v0.4.1
- github.com/Azure/go-autorest/logger v0.2.0
+ github.com/Azure/go-autorest/logger v0.2.1
github.com/Azure/go-autorest/tracing v0.6.0
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum
index fa27c68d1..373d9c4e2 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/go.sum
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum
@@ -1,13 +1,13 @@
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go
index da65e1041..792f82d4b 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go
@@ -1,3 +1,4 @@
+//go:build modhack
// +build modhack
package autorest
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
index 7143cc61b..4c87030e8 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
@@ -1,3 +1,4 @@
+//go:build !go1.8
// +build !go1.8
// Copyright 2017 Microsoft Corporation
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
index ae15c6bf9..05847c08b 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
@@ -1,3 +1,4 @@
+//go:build go1.8
// +build go1.8
// Copyright 2017 Microsoft Corporation
diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go
index 78610ef20..7a495f732 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go
@@ -26,6 +26,7 @@ import (
"sync"
"time"
+ "github.com/Azure/go-autorest/logger"
"github.com/Azure/go-autorest/tracing"
)
@@ -271,6 +272,7 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
if err == nil {
return resp, err
}
+ logger.Instance.Writef(logger.LogError, "DoRetryForAttempts: received error for attempt %d: %v\n", attempt+1, err)
if !DelayForBackoff(backoff, attempt, r.Context().Done()) {
return nil, r.Context().Err()
}
@@ -325,6 +327,9 @@ func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempt
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
return resp, err
}
+ if err != nil {
+ logger.Instance.Writef(logger.LogError, "DoRetryForStatusCodes: received error for attempt %d: %v\n", attempt+1, err)
+ }
delayed := DelayWithRetryAfter(resp, r.Context().Done())
// if this was a 429 set the delay cap as specified.
// applicable only in the absence of a retry-after header.
@@ -391,6 +396,7 @@ func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
if err == nil {
return resp, err
}
+ logger.Instance.Writef(logger.LogError, "DoRetryForDuration: received error for attempt %d: %v\n", attempt+1, err)
if !DelayForBackoff(backoff, attempt, r.Context().Done()) {
return nil, r.Context().Err()
}
@@ -438,6 +444,7 @@ func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-ch
if cap > 0 && d > cap {
d = cap
}
+ logger.Instance.Writef(logger.LogInfo, "DelayForBackoffWithCap: sleeping for %s\n", d)
select {
case <-time.After(d):
return true
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod
index 48fd8c6e5..8fd041e2b 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod
@@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/to
go 1.12
-require github.com/Azure/go-autorest/autorest v0.9.0
+require github.com/Azure/go-autorest v14.2.0+incompatible
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.sum b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum
index d7ee6b462..1fc56a962 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/to/go.sum
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum
@@ -1,17 +1,2 @@
-github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go
index 8e8292107..b7310f6b8 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go
@@ -16,9 +16,9 @@ package to
// See the License for the specific language governing permissions and
// limitations under the License.
-// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
// the resultant binary.
// Necessary for safely adding multi-module repo.
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
-import _ "github.com/Azure/go-autorest/autorest"
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go
index 4cb5e6849..3133fcc08 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go
@@ -1,3 +1,4 @@
+//go:build go1.13
// +build go1.13
// Copyright 2017 Microsoft Corporation
diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go
index ebb51b4f5..851e152db 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go
@@ -1,3 +1,4 @@
+//go:build !go1.13
// +build !go1.13
// Copyright 2017 Microsoft Corporation
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod
index b3f9b6a09..a0a69e9ae 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod
@@ -3,6 +3,6 @@ module github.com/Azure/go-autorest/autorest/validation
go 1.12
require (
- github.com/Azure/go-autorest/autorest v0.9.0
+ github.com/Azure/go-autorest v14.2.0+incompatible
github.com/stretchr/testify v1.3.0
)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum
index 6b9010a73..6c1119aab 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum
@@ -1,24 +1,9 @@
-github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go
index 2b2668581..cf1436291 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go
@@ -16,9 +16,9 @@ package validation
// See the License for the specific language governing permissions and
// limitations under the License.
-// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
// the resultant binary.
// Necessary for safely adding multi-module repo.
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
-import _ "github.com/Azure/go-autorest/autorest"
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
index 65899b69b..ff41cfe07 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
@@ -24,6 +24,9 @@ import (
"strings"
)
+// Disabled controls if parameter validation should be globally disabled. The default is false.
+var Disabled bool
+
// Constraint stores constraint name, target field name
// Rule and chain validations.
type Constraint struct {
@@ -68,6 +71,9 @@ const (
// Validate method validates constraints on parameter
// passed in validation array.
func Validate(m []Validation) error {
+ if Disabled {
+ return nil
+ }
for _, item := range m {
v := reflect.ValueOf(item.TargetValue)
for _, constraint := range item.Constraints {
diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go
index da09f394c..2f5d8cc1a 100644
--- a/vendor/github.com/Azure/go-autorest/logger/logger.go
+++ b/vendor/github.com/Azure/go-autorest/logger/logger.go
@@ -55,6 +55,10 @@ const (
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogDebug
+
+ // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response.
+ // NOTE: this can disclose sensitive information, use with care.
+ LogAuth
)
const (
@@ -65,6 +69,7 @@ const (
logWarning = "WARNING"
logInfo = "INFO"
logDebug = "DEBUG"
+ logAuth = "AUTH"
logUnknown = "UNKNOWN"
)
@@ -83,6 +88,8 @@ func ParseLevel(s string) (lt LevelType, err error) {
lt = LogInfo
case logDebug:
lt = LogDebug
+ case logAuth:
+ lt = LogAuth
default:
err = fmt.Errorf("bad log level '%s'", s)
}
@@ -106,6 +113,8 @@ func (lt LevelType) String() string {
return logInfo
case LogDebug:
return logDebug
+ case LogAuth:
+ return logAuth
default:
return logUnknown
}
diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS
new file mode 100644
index 000000000..ae1b4942b
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS
@@ -0,0 +1 @@
+ * @microsoft/containerplat
diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go
index ada2fbab6..3ab6bff69 100644
--- a/vendor/github.com/Microsoft/go-winio/fileinfo.go
+++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go
@@ -5,21 +5,14 @@ package winio
import (
"os"
"runtime"
- "syscall"
"unsafe"
-)
-
-//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
-//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
-const (
- fileBasicInfo = 0
- fileIDInfo = 0x12
+ "golang.org/x/sys/windows"
)
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
- CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
+ CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
FileAttributes uint32
pad uint32 // padding
}
@@ -27,7 +20,7 @@ type FileBasicInfo struct {
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
- if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -36,13 +29,32 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
- if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return nil
}
+// FileStandardInfo contains extended information for the file.
+// FILE_STANDARD_INFO in WinBase.h
+// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
+type FileStandardInfo struct {
+ AllocationSize, EndOfFile int64
+ NumberOfLinks uint32
+ DeletePending, Directory bool
+}
+
+// GetFileStandardInfo retrieves ended information for the file.
+func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
+ si := &FileStandardInfo{}
+ if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return si, nil
+}
+
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
// unique on a system.
type FileIDInfo struct {
@@ -53,7 +65,7 @@ type FileIDInfo struct {
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
- if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
+ if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod
index b3846826b..98a8dea0e 100644
--- a/vendor/github.com/Microsoft/go-winio/go.mod
+++ b/vendor/github.com/Microsoft/go-winio/go.mod
@@ -3,7 +3,7 @@ module github.com/Microsoft/go-winio
go 1.12
require (
- github.com/pkg/errors v0.8.1
- github.com/sirupsen/logrus v1.4.1
- golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
+ github.com/pkg/errors v0.9.1
+ github.com/sirupsen/logrus v1.7.0
+ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)
diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum
index babb4a70d..aa6ad3b57 100644
--- a/vendor/github.com/Microsoft/go-winio/go.sum
+++ b/vendor/github.com/Microsoft/go-winio/go.sum
@@ -1,16 +1,14 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go
index dbfe790ee..b632f8f8b 100644
--- a/vendor/github.com/Microsoft/go-winio/hvsock.go
+++ b/vendor/github.com/Microsoft/go-winio/hvsock.go
@@ -1,3 +1,5 @@
+// +build windows
+
package winio
import (
diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go
index d6a46f6a2..96700a73d 100644
--- a/vendor/github.com/Microsoft/go-winio/pipe.go
+++ b/vendor/github.com/Microsoft/go-winio/pipe.go
@@ -182,13 +182,14 @@ func (s pipeAddress) String() string {
}
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
-func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
+func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
for {
+
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
- h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
+ h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err == nil {
return h, nil
}
@@ -197,7 +198,7 @@ func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
- time.Sleep(time.Millisecond * 10)
+ time.Sleep(10 * time.Millisecond)
}
}
}
@@ -210,7 +211,7 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
} else {
- absTimeout = time.Now().Add(time.Second * 2)
+ absTimeout = time.Now().Add(2 * time.Second)
}
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
conn, err := DialPipeContext(ctx, path)
@@ -223,9 +224,15 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
// cancellation or timeout.
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
+ return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
+}
+
+// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
+// cancellation or timeout.
+func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
var err error
var h syscall.Handle
- h, err = tryDialPipe(ctx, &path)
+ h, err = tryDialPipe(ctx, &path, access)
if err != nil {
return nil, err
}
@@ -422,10 +429,10 @@ type PipeConfig struct {
// when the pipe is in message mode.
MessageMode bool
- // InputBufferSize specifies the size the input buffer, in bytes.
+ // InputBufferSize specifies the size of the input buffer, in bytes.
InputBufferSize int32
- // OutputBufferSize specifies the size the input buffer, in bytes.
+ // OutputBufferSize specifies the size of the output buffer, in bytes.
OutputBufferSize int32
}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
index 586406577..f497c0e39 100644
--- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
@@ -1,3 +1,5 @@
+// +build windows
+
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go
index 5cb52bc74..5955c99fd 100644
--- a/vendor/github.com/Microsoft/go-winio/syscall.go
+++ b/vendor/github.com/Microsoft/go-winio/syscall.go
@@ -1,3 +1,3 @@
package winio
-//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
index e26b01faf..176ff75e3 100644
--- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
@@ -19,6 +19,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,7 +27,7 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
@@ -37,514 +38,382 @@ func errnoErr(e syscall.Errno) error {
}
var (
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
- modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
- modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+ modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
- procCancelIoEx = modkernel32.NewProc("CancelIoEx")
- procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
- procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
- procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
- procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
- procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
- procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
- procCreateFileW = modkernel32.NewProc("CreateFileW")
- procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
- procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
- procLocalAlloc = modkernel32.NewProc("LocalAlloc")
- procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
- procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
- procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
- procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
- procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
+ procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
+ procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
- procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
- procLocalFree = modkernel32.NewProc("LocalFree")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
- procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
- procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
- procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
- procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
- procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
- procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
- procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
- procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
+ procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
+ procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
+ procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
+ procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
+ procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
+ procCancelIoEx = modkernel32.NewProc("CancelIoEx")
+ procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
+ procCreateFileW = modkernel32.NewProc("CreateFileW")
+ procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
+ procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
+ procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
+ procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
+ procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
+ procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
+ procLocalAlloc = modkernel32.NewProc("LocalAlloc")
+ procLocalFree = modkernel32.NewProc("LocalFree")
+ procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
+ procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
+ procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
+ procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
+ procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
+ procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procbind = modws2_32.NewProc("bind")
)
-func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
+ var _p0 uint32
+ if releaseAll {
+ _p0 = 1
}
- return
-}
-
-func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
- newport = syscall.Handle(r0)
- if newport == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
+ success = r0 != 0
+ if true {
+ err = errnoErr(e1)
}
return
}
-func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
+func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
+func convertSidToStringSid(sid *byte, str **uint16) (err error) {
+ r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
- var _p0 uint32
- if wait {
- _p0 = 1
- } else {
- _p0 = 0
+func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(str)
+ if err != nil {
+ return
}
- r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
+ return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
+}
+
+func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
+func getSecurityDescriptorLength(sd uintptr) (len uint32) {
+ r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
+ len = uint32(r0)
+ return
+}
+
+func impersonateSelf(level uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(name)
+ _p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
- return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
+ return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
-func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
- handle = syscall.Handle(r0)
- if handle == syscall.InvalidHandle {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
}
return
}
-func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
+func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(name)
+ _p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
- return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
+ return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
-func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
- handle = syscall.Handle(r0)
- if handle == syscall.InvalidHandle {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
}
return
}
-func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
}
- return
+ return _lookupPrivilegeName(_p0, luid, buffer, size)
}
-func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
- r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
- ptr = uintptr(r0)
- return
+func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeValue(_p0, _p1, luid)
}
-func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
- r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
- status = ntstatus(r0)
+func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
+ r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
return
}
-func rtlNtStatusToDosError(status ntstatus) (winerr error) {
- r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
- if r0 != 0 {
- winerr = syscall.Errno(r0)
+func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
+ var _p0 uint32
+ if openAsSelf {
+ _p0 = 1
+ }
+ r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
}
return
}
-func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
- status = ntstatus(r0)
+func revertToSelf() (err error) {
+ r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
return
}
-func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
- r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
- status = ntstatus(r0)
+func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
+ }
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ }
+ r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
return
}
-func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(accountName)
- if err != nil {
- return
+func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
}
- return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ }
+ r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
}
-func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
+func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func convertSidToStringSid(sid *byte, str **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
+func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
+func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(str)
+ _p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
- return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
+ return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
-func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = errnoErr(e1)
}
return
}
-func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
+ newport = syscall.Handle(r0)
+ if newport == 0 {
+ err = errnoErr(e1)
}
return
}
-func localFree(mem uintptr) {
- syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
- return
-}
-
-func getSecurityDescriptorLength(sd uintptr) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
- len = uint32(r0)
- return
-}
-
-func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
}
- return
+ return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
-func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = errnoErr(e1)
}
return
}
-func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
- var _p0 uint32
- if releaseAll {
- _p0 = 1
- } else {
- _p0 = 0
- }
- r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
- success = r0 != 0
- if true {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
+func getCurrentThread() (h syscall.Handle) {
+ r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
+ h = syscall.Handle(r0)
return
}
-func impersonateSelf(level uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
+func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
+ r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func revertToSelf() (err error) {
- r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
- var _p0 uint32
- if openAsSelf {
- _p0 = 1
- } else {
- _p0 = 0
- }
- r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func getCurrentThread() (h syscall.Handle) {
- r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
- h = syscall.Handle(r0)
+func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
+ r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
+ ptr = uintptr(r0)
return
}
-func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(systemName)
- if err != nil {
- return
- }
- var _p1 *uint16
- _p1, err = syscall.UTF16PtrFromString(name)
- if err != nil {
- return
- }
- return _lookupPrivilegeValue(_p0, _p1, luid)
+func localFree(mem uintptr) {
+ syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
+ return
}
-func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
- r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
-func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(systemName)
- if err != nil {
- return
- }
- return _lookupPrivilegeName(_p0, luid, buffer, size)
-}
-
-func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
+func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
+ r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
+ status = ntstatus(r0)
return
}
-func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(systemName)
- if err != nil {
- return
- }
- return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
+func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
+ r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
+ status = ntstatus(r0)
+ return
}
-func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
+func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
+ r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
+ status = ntstatus(r0)
return
}
-func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
- var _p0 *byte
- if len(b) > 0 {
- _p0 = &b[0]
- }
- var _p1 uint32
- if abort {
- _p1 = 1
- } else {
- _p1 = 0
- }
- var _p2 uint32
- if processSecurity {
- _p2 = 1
- } else {
- _p2 = 0
- }
- r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func rtlNtStatusToDosError(status ntstatus) (winerr error) {
+ r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
+ if r0 != 0 {
+ winerr = syscall.Errno(r0)
}
return
}
-func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
- var _p0 *byte
- if len(b) > 0 {
- _p0 = &b[0]
- }
- var _p1 uint32
- if abort {
- _p1 = 1
- } else {
- _p1 = 0
- }
- var _p2 uint32
- if processSecurity {
- _p2 = 1
- } else {
- _p2 = 0
+func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
+ var _p0 uint32
+ if wait {
+ _p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
@@ -552,11 +421,7 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+ err = errnoErr(e1)
}
return
}
diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE
deleted file mode 100644
index 744875676..000000000
--- a/vendor/github.com/alecthomas/template/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md
deleted file mode 100644
index ef6a8ee30..000000000
--- a/vendor/github.com/alecthomas/template/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Go's `text/template` package with newline elision
-
-This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
-
-eg.
-
-```
-{{if true}}\
-hello
-{{end}}\
-```
-
-Will result in:
-
-```
-hello\n
-```
-
-Rather than:
-
-```
-\n
-hello\n
-\n
-```
diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go
deleted file mode 100644
index 223c595c2..000000000
--- a/vendor/github.com/alecthomas/template/doc.go
+++ /dev/null
@@ -1,406 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template implements data-driven templates for generating textual output.
-
-To generate HTML output, see package html/template, which has the same interface
-as this package but automatically secures HTML output against certain attacks.
-
-Templates are executed by applying them to a data structure. Annotations in the
-template refer to elements of the data structure (typically a field of a struct
-or a key in a map) to control execution and derive values to be displayed.
-Execution of the template walks the structure and sets the cursor, represented
-by a period '.' and called "dot", to the value at the current location in the
-structure as execution proceeds.
-
-The input text for a template is UTF-8-encoded text in any format.
-"Actions"--data evaluations or control structures--are delimited by
-"{{" and "}}"; all text outside actions is copied to the output unchanged.
-Actions may not span newlines, although comments can.
-
-Once parsed, a template may be executed safely in parallel.
-
-Here is a trivial example that prints "17 items are made of wool".
-
- type Inventory struct {
- Material string
- Count uint
- }
- sweaters := Inventory{"wool", 17}
- tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
- if err != nil { panic(err) }
- err = tmpl.Execute(os.Stdout, sweaters)
- if err != nil { panic(err) }
-
-More intricate examples appear below.
-
-Actions
-
-Here is the list of actions. "Arguments" and "pipelines" are evaluations of
-data, defined in detail below.
-
-*/
-// {{/* a comment */}}
-// A comment; discarded. May contain newlines.
-// Comments do not nest and must start and end at the
-// delimiters, as shown here.
-/*
-
- {{pipeline}}
- The default textual representation of the value of the pipeline
- is copied to the output.
-
- {{if pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, T1 is executed. The empty values are false, 0, any
- nil pointer or interface value, and any array, slice, map, or
- string of length zero.
- Dot is unaffected.
-
- {{if pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, T0 is executed;
- otherwise, T1 is executed. Dot is unaffected.
-
- {{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
- To simplify the appearance of if-else chains, the else action
- of an if may include another if directly; the effect is exactly
- the same as writing
- {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
-
- {{range pipeline}} T1 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, nothing is output;
- otherwise, dot is set to the successive elements of the array,
- slice, or map and T1 is executed. If the value is a map and the
- keys are of basic type with a defined order ("comparable"), the
- elements will be visited in sorted key order.
-
- {{range pipeline}} T1 {{else}} T0 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, dot is unaffected and
- T0 is executed; otherwise, dot is set to the successive elements
- of the array, slice, or map and T1 is executed.
-
- {{template "name"}}
- The template with the specified name is executed with nil data.
-
- {{template "name" pipeline}}
- The template with the specified name is executed with dot set
- to the value of the pipeline.
-
- {{with pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, dot is set to the value of the pipeline and T1 is
- executed.
-
- {{with pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, dot is unaffected and T0
- is executed; otherwise, dot is set to the value of the pipeline
- and T1 is executed.
-
-Arguments
-
-An argument is a simple value, denoted by one of the following.
-
- - A boolean, string, character, integer, floating-point, imaginary
- or complex constant in Go syntax. These behave like Go's untyped
- constants, although raw strings may not span newlines.
- - The keyword nil, representing an untyped Go nil.
- - The character '.' (period):
- .
- The result is the value of dot.
- - A variable name, which is a (possibly empty) alphanumeric string
- preceded by a dollar sign, such as
- $piOver2
- or
- $
- The result is the value of the variable.
- Variables are described below.
- - The name of a field of the data, which must be a struct, preceded
- by a period, such as
- .Field
- The result is the value of the field. Field invocations may be
- chained:
- .Field1.Field2
- Fields can also be evaluated on variables, including chaining:
- $x.Field1.Field2
- - The name of a key of the data, which must be a map, preceded
- by a period, such as
- .Key
- The result is the map element value indexed by the key.
- Key invocations may be chained and combined with fields to any
- depth:
- .Field1.Key1.Field2.Key2
- Although the key must be an alphanumeric identifier, unlike with
- field names they do not need to start with an upper case letter.
- Keys can also be evaluated on variables, including chaining:
- $x.key1.key2
- - The name of a niladic method of the data, preceded by a period,
- such as
- .Method
- The result is the value of invoking the method with dot as the
- receiver, dot.Method(). Such a method must have one return value (of
- any type) or two return values, the second of which is an error.
- If it has two and the returned error is non-nil, execution terminates
- and an error is returned to the caller as the value of Execute.
- Method invocations may be chained and combined with fields and keys
- to any depth:
- .Field1.Key1.Method1.Field2.Key2.Method2
- Methods can also be evaluated on variables, including chaining:
- $x.Method1.Field
- - The name of a niladic function, such as
- fun
- The result is the value of invoking the function, fun(). The return
- types and values behave as in methods. Functions and function
- names are described below.
- - A parenthesized instance of one the above, for grouping. The result
- may be accessed by a field or map key invocation.
- print (.F1 arg1) (.F2 arg2)
- (.StructValuedMethod "arg").Field
-
-Arguments may evaluate to any type; if they are pointers the implementation
-automatically indirects to the base type when required.
-If an evaluation yields a function value, such as a function-valued
-field of a struct, the function is not invoked automatically, but it
-can be used as a truth value for an if action and the like. To invoke
-it, use the call function, defined below.
-
-A pipeline is a possibly chained sequence of "commands". A command is a simple
-value (argument) or a function or method call, possibly with multiple arguments:
-
- Argument
- The result is the value of evaluating the argument.
- .Method [Argument...]
- The method can be alone or the last element of a chain but,
- unlike methods in the middle of a chain, it can take arguments.
- The result is the value of calling the method with the
- arguments:
- dot.Method(Argument1, etc.)
- functionName [Argument...]
- The result is the value of calling the function associated
- with the name:
- function(Argument1, etc.)
- Functions and function names are described below.
-
-Pipelines
-
-A pipeline may be "chained" by separating a sequence of commands with pipeline
-characters '|'. In a chained pipeline, the result of the each command is
-passed as the last argument of the following command. The output of the final
-command in the pipeline is the value of the pipeline.
-
-The output of a command will be either one value or two values, the second of
-which has type error. If that second value is present and evaluates to
-non-nil, execution terminates and the error is returned to the caller of
-Execute.
-
-Variables
-
-A pipeline inside an action may initialize a variable to capture the result.
-The initialization has syntax
-
- $variable := pipeline
-
-where $variable is the name of the variable. An action that declares a
-variable produces no output.
-
-If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
-variables, separated by a comma:
-
- range $index, $element := pipeline
-
-in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
-only one variable, it is assigned the element; this is opposite to the
-convention in Go range clauses.
-
-A variable's scope extends to the "end" action of the control structure ("if",
-"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
-variables from the point of its invocation.
-
-When execution begins, $ is set to the data argument passed to Execute, that is,
-to the starting value of dot.
-
-Examples
-
-Here are some example one-line templates demonstrating pipelines and variables.
-All produce the quoted word "output":
-
- {{"\"output\""}}
- A string constant.
- {{`"output"`}}
- A raw string constant.
- {{printf "%q" "output"}}
- A function call.
- {{"output" | printf "%q"}}
- A function call whose final argument comes from the previous
- command.
- {{printf "%q" (print "out" "put")}}
- A parenthesized argument.
- {{"put" | printf "%s%s" "out" | printf "%q"}}
- A more elaborate call.
- {{"output" | printf "%s" | printf "%q"}}
- A longer chain.
- {{with "output"}}{{printf "%q" .}}{{end}}
- A with action using dot.
- {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
- A with action that creates and uses a variable.
- {{with $x := "output"}}{{printf "%q" $x}}{{end}}
- A with action that uses the variable in another action.
- {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
- The same, but pipelined.
-
-Functions
-
-During execution functions are found in two function maps: first in the
-template, then in the global function map. By default, no functions are defined
-in the template but the Funcs method can be used to add them.
-
-Predefined global functions are named as follows.
-
- and
- Returns the boolean AND of its arguments by returning the
- first empty argument or the last argument, that is,
- "and x y" behaves as "if x then y else x". All the
- arguments are evaluated.
- call
- Returns the result of calling the first argument, which
- must be a function, with the remaining arguments as parameters.
- Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
- Y is a func-valued field, map entry, or the like.
- The first argument must be the result of an evaluation
- that yields a value of function type (as distinct from
- a predefined function such as print). The function must
- return either one or two result values, the second of which
- is of type error. If the arguments don't match the function
- or the returned error value is non-nil, execution stops.
- html
- Returns the escaped HTML equivalent of the textual
- representation of its arguments.
- index
- Returns the result of indexing its first argument by the
- following arguments. Thus "index x 1 2 3" is, in Go syntax,
- x[1][2][3]. Each indexed item must be a map, slice, or array.
- js
- Returns the escaped JavaScript equivalent of the textual
- representation of its arguments.
- len
- Returns the integer length of its argument.
- not
- Returns the boolean negation of its single argument.
- or
- Returns the boolean OR of its arguments by returning the
- first non-empty argument or the last argument, that is,
- "or x y" behaves as "if x then x else y". All the
- arguments are evaluated.
- print
- An alias for fmt.Sprint
- printf
- An alias for fmt.Sprintf
- println
- An alias for fmt.Sprintln
- urlquery
- Returns the escaped value of the textual representation of
- its arguments in a form suitable for embedding in a URL query.
-
-The boolean functions take any zero value to be false and a non-zero
-value to be true.
-
-There is also a set of binary comparison operators defined as
-functions:
-
- eq
- Returns the boolean truth of arg1 == arg2
- ne
- Returns the boolean truth of arg1 != arg2
- lt
- Returns the boolean truth of arg1 < arg2
- le
- Returns the boolean truth of arg1 <= arg2
- gt
- Returns the boolean truth of arg1 > arg2
- ge
- Returns the boolean truth of arg1 >= arg2
-
-For simpler multi-way equality tests, eq (only) accepts two or more
-arguments and compares the second and subsequent to the first,
-returning in effect
-
- arg1==arg2 || arg1==arg3 || arg1==arg4 ...
-
-(Unlike with || in Go, however, eq is a function call and all the
-arguments will be evaluated.)
-
-The comparison functions work on basic types only (or named basic
-types, such as "type Celsius float32"). They implement the Go rules
-for comparison of values, except that size and exact type are
-ignored, so any integer value, signed or unsigned, may be compared
-with any other integer value. (The arithmetic value is compared,
-not the bit pattern, so all negative integers are less than all
-unsigned integers.) However, as usual, one may not compare an int
-with a float32 and so on.
-
-Associated templates
-
-Each template is named by a string specified when it is created. Also, each
-template is associated with zero or more other templates that it may invoke by
-name; such associations are transitive and form a name space of templates.
-
-A template may use a template invocation to instantiate another associated
-template; see the explanation of the "template" action above. The name must be
-that of a template associated with the template that contains the invocation.
-
-Nested template definitions
-
-When parsing a template, another template may be defined and associated with the
-template being parsed. Template definitions must appear at the top level of the
-template, much like global variables in a Go program.
-
-The syntax of such definitions is to surround each template declaration with a
-"define" and "end" action.
-
-The define action names the template being created by providing a string
-constant. Here is a simple example:
-
- `{{define "T1"}}ONE{{end}}
- {{define "T2"}}TWO{{end}}
- {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
- {{template "T3"}}`
-
-This defines two templates, T1 and T2, and a third T3 that invokes the other two
-when it is executed. Finally it invokes T3. If executed this template will
-produce the text
-
- ONE TWO
-
-By construction, a template may reside in only one association. If it's
-necessary to have a template addressable from multiple associations, the
-template definition must be parsed multiple times to create distinct *Template
-values, or must be copied with the Clone or AddParseTree method.
-
-Parse may be called multiple times to assemble the various associated templates;
-see the ParseFiles and ParseGlob functions and methods for simple ways to parse
-related templates stored in files.
-
-A template may be executed directly or through ExecuteTemplate, which executes
-an associated template identified by name. To invoke our example above, we
-might write,
-
- err := tmpl.Execute(os.Stdout, "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-or to invoke a particular template explicitly by name,
-
- err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-*/
-package template
diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go
deleted file mode 100644
index c3078e5d0..000000000
--- a/vendor/github.com/alecthomas/template/exec.go
+++ /dev/null
@@ -1,845 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "runtime"
- "sort"
- "strings"
-
- "github.com/alecthomas/template/parse"
-)
-
-// state represents the state of an execution. It's not part of the
-// template so that multiple executions of the same template
-// can execute in parallel.
-type state struct {
- tmpl *Template
- wr io.Writer
- node parse.Node // current node, for errors
- vars []variable // push-down stack of variable values.
-}
-
-// variable holds the dynamic value of a variable such as $, $x etc.
-type variable struct {
- name string
- value reflect.Value
-}
-
-// push pushes a new variable on the stack.
-func (s *state) push(name string, value reflect.Value) {
- s.vars = append(s.vars, variable{name, value})
-}
-
-// mark returns the length of the variable stack.
-func (s *state) mark() int {
- return len(s.vars)
-}
-
-// pop pops the variable stack up to the mark.
-func (s *state) pop(mark int) {
- s.vars = s.vars[0:mark]
-}
-
-// setVar overwrites the top-nth variable on the stack. Used by range iterations.
-func (s *state) setVar(n int, value reflect.Value) {
- s.vars[len(s.vars)-n].value = value
-}
-
-// varValue returns the value of the named variable.
-func (s *state) varValue(name string) reflect.Value {
- for i := s.mark() - 1; i >= 0; i-- {
- if s.vars[i].name == name {
- return s.vars[i].value
- }
- }
- s.errorf("undefined variable: %s", name)
- return zero
-}
-
-var zero reflect.Value
-
-// at marks the state to be on node n, for error reporting.
-func (s *state) at(node parse.Node) {
- s.node = node
-}
-
-// doublePercent returns the string with %'s replaced by %%, if necessary,
-// so it can be used safely inside a Printf format string.
-func doublePercent(str string) string {
- if strings.Contains(str, "%") {
- str = strings.Replace(str, "%", "%%", -1)
- }
- return str
-}
-
-// errorf formats the error and terminates processing.
-func (s *state) errorf(format string, args ...interface{}) {
- name := doublePercent(s.tmpl.Name())
- if s.node == nil {
- format = fmt.Sprintf("template: %s: %s", name, format)
- } else {
- location, context := s.tmpl.ErrorContext(s.node)
- format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
- }
- panic(fmt.Errorf(format, args...))
-}
-
-// errRecover is the handler that turns panics into returns from the top
-// level of Parse.
-func errRecover(errp *error) {
- e := recover()
- if e != nil {
- switch err := e.(type) {
- case runtime.Error:
- panic(e)
- case error:
- *errp = err
- default:
- panic(e)
- }
- }
-}
-
-// ExecuteTemplate applies the template associated with t that has the given name
-// to the specified data object and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
- tmpl := t.tmpl[name]
- if tmpl == nil {
- return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
- }
- return tmpl.Execute(wr, data)
-}
-
-// Execute applies a parsed template to the specified data object,
-// and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
- defer errRecover(&err)
- value := reflect.ValueOf(data)
- state := &state{
- tmpl: t,
- wr: wr,
- vars: []variable{{"$", value}},
- }
- t.init()
- if t.Tree == nil || t.Root == nil {
- var b bytes.Buffer
- for name, tmpl := range t.tmpl {
- if tmpl.Tree == nil || tmpl.Root == nil {
- continue
- }
- if b.Len() > 0 {
- b.WriteString(", ")
- }
- fmt.Fprintf(&b, "%q", name)
- }
- var s string
- if b.Len() > 0 {
- s = "; defined templates are: " + b.String()
- }
- state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
- }
- state.walk(value, t.Root)
- return
-}
-
-// Walk functions step through the major pieces of the template structure,
-// generating output as they go.
-func (s *state) walk(dot reflect.Value, node parse.Node) {
- s.at(node)
- switch node := node.(type) {
- case *parse.ActionNode:
- // Do not pop variables so they persist until next end.
- // Also, if the action declares variables, don't print the result.
- val := s.evalPipeline(dot, node.Pipe)
- if len(node.Pipe.Decl) == 0 {
- s.printValue(node, val)
- }
- case *parse.IfNode:
- s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
- case *parse.ListNode:
- for _, node := range node.Nodes {
- s.walk(dot, node)
- }
- case *parse.RangeNode:
- s.walkRange(dot, node)
- case *parse.TemplateNode:
- s.walkTemplate(dot, node)
- case *parse.TextNode:
- if _, err := s.wr.Write(node.Text); err != nil {
- s.errorf("%s", err)
- }
- case *parse.WithNode:
- s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
- default:
- s.errorf("unknown node: %s", node)
- }
-}
-
-// walkIfOrWith walks an 'if' or 'with' node. The two control structures
-// are identical in behavior except that 'with' sets dot.
-func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
- defer s.pop(s.mark())
- val := s.evalPipeline(dot, pipe)
- truth, ok := isTrue(val)
- if !ok {
- s.errorf("if/with can't use %v", val)
- }
- if truth {
- if typ == parse.NodeWith {
- s.walk(val, list)
- } else {
- s.walk(dot, list)
- }
- } else if elseList != nil {
- s.walk(dot, elseList)
- }
-}
-
-// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value.
-func isTrue(val reflect.Value) (truth, ok bool) {
- if !val.IsValid() {
- // Something like var x interface{}, never set. It's a form of nil.
- return false, true
- }
- switch val.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- truth = val.Len() > 0
- case reflect.Bool:
- truth = val.Bool()
- case reflect.Complex64, reflect.Complex128:
- truth = val.Complex() != 0
- case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
- truth = !val.IsNil()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- truth = val.Int() != 0
- case reflect.Float32, reflect.Float64:
- truth = val.Float() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- truth = val.Uint() != 0
- case reflect.Struct:
- truth = true // Struct values are always true.
- default:
- return
- }
- return truth, true
-}
-
-func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
- s.at(r)
- defer s.pop(s.mark())
- val, _ := indirect(s.evalPipeline(dot, r.Pipe))
- // mark top of stack before any variables in the body are pushed.
- mark := s.mark()
- oneIteration := func(index, elem reflect.Value) {
- // Set top var (lexically the second if there are two) to the element.
- if len(r.Pipe.Decl) > 0 {
- s.setVar(1, elem)
- }
- // Set next var (lexically the first if there are two) to the index.
- if len(r.Pipe.Decl) > 1 {
- s.setVar(2, index)
- }
- s.walk(elem, r.List)
- s.pop(mark)
- }
- switch val.Kind() {
- case reflect.Array, reflect.Slice:
- if val.Len() == 0 {
- break
- }
- for i := 0; i < val.Len(); i++ {
- oneIteration(reflect.ValueOf(i), val.Index(i))
- }
- return
- case reflect.Map:
- if val.Len() == 0 {
- break
- }
- for _, key := range sortKeys(val.MapKeys()) {
- oneIteration(key, val.MapIndex(key))
- }
- return
- case reflect.Chan:
- if val.IsNil() {
- break
- }
- i := 0
- for ; ; i++ {
- elem, ok := val.Recv()
- if !ok {
- break
- }
- oneIteration(reflect.ValueOf(i), elem)
- }
- if i == 0 {
- break
- }
- return
- case reflect.Invalid:
- break // An invalid value is likely a nil map, etc. and acts like an empty map.
- default:
- s.errorf("range can't iterate over %v", val)
- }
- if r.ElseList != nil {
- s.walk(dot, r.ElseList)
- }
-}
-
-func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
- s.at(t)
- tmpl := s.tmpl.tmpl[t.Name]
- if tmpl == nil {
- s.errorf("template %q not defined", t.Name)
- }
- // Variables declared by the pipeline persist.
- dot = s.evalPipeline(dot, t.Pipe)
- newState := *s
- newState.tmpl = tmpl
- // No dynamic scoping: template invocations inherit no variables.
- newState.vars = []variable{{"$", dot}}
- newState.walk(dot, tmpl.Root)
-}
-
-// Eval functions evaluate pipelines, commands, and their elements and extract
-// values from the data structure by examining fields, calling methods, and so on.
-// The printing of those values happens only through walk functions.
-
-// evalPipeline returns the value acquired by evaluating a pipeline. If the
-// pipeline has a variable declaration, the variable will be pushed on the
-// stack. Callers should therefore pop the stack after they are finished
-// executing commands depending on the pipeline value.
-func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
- if pipe == nil {
- return
- }
- s.at(pipe)
- for _, cmd := range pipe.Cmds {
- value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
- // If the object has type interface{}, dig down one level to the thing inside.
- if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
- value = reflect.ValueOf(value.Interface()) // lovely!
- }
- }
- for _, variable := range pipe.Decl {
- s.push(variable.Ident[0], value)
- }
- return value
-}
-
-func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
- if len(args) > 1 || final.IsValid() {
- s.errorf("can't give argument to non-function %s", args[0])
- }
-}
-
-func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
- firstWord := cmd.Args[0]
- switch n := firstWord.(type) {
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, cmd.Args, final)
- case *parse.ChainNode:
- return s.evalChainNode(dot, n, cmd.Args, final)
- case *parse.IdentifierNode:
- // Must be a function.
- return s.evalFunction(dot, n, cmd, cmd.Args, final)
- case *parse.PipeNode:
- // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
- return s.evalPipeline(dot, n)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, cmd.Args, final)
- }
- s.at(firstWord)
- s.notAFunction(cmd.Args, final)
- switch word := firstWord.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(word.True)
- case *parse.DotNode:
- return dot
- case *parse.NilNode:
- s.errorf("nil is not a command")
- case *parse.NumberNode:
- return s.idealConstant(word)
- case *parse.StringNode:
- return reflect.ValueOf(word.Text)
- }
- s.errorf("can't evaluate command %q", firstWord)
- panic("not reached")
-}
-
-// idealConstant is called to return the value of a number in a context where
-// we don't know the type. In that case, the syntax of the number tells us
-// its type, and we use Go rules to resolve. Note there is no such thing as
-// a uint ideal constant in this situation - the value must be of int type.
-func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
- // These are ideal constants but we don't know the type
- // and we have no context. (If it was a method argument,
- // we'd know what we need.) The syntax guides us to some extent.
- s.at(constant)
- switch {
- case constant.IsComplex:
- return reflect.ValueOf(constant.Complex128) // incontrovertible.
- case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
- return reflect.ValueOf(constant.Float64)
- case constant.IsInt:
- n := int(constant.Int64)
- if int64(n) != constant.Int64 {
- s.errorf("%s overflows int", constant.Text)
- }
- return reflect.ValueOf(n)
- case constant.IsUint:
- s.errorf("%s overflows int", constant.Text)
- }
- return zero
-}
-
-func isHexConstant(s string) bool {
- return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
-}
-
-func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(field)
- return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
-}
-
-func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(chain)
- // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
- pipe := s.evalArg(dot, nil, chain.Node)
- if len(chain.Field) == 0 {
- s.errorf("internal error: no fields in evalChainNode")
- }
- return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
-}
-
-func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
- // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
- s.at(variable)
- value := s.varValue(variable.Ident[0])
- if len(variable.Ident) == 1 {
- s.notAFunction(args, final)
- return value
- }
- return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
-}
-
-// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
-// dot is the environment in which to evaluate arguments, while
-// receiver is the value being walked along the chain.
-func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
- n := len(ident)
- for i := 0; i < n-1; i++ {
- receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
- }
- // Now if it's a method, it gets the arguments.
- return s.evalField(dot, ident[n-1], node, args, final, receiver)
-}
-
-func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(node)
- name := node.Ident
- function, ok := findFunction(name, s.tmpl)
- if !ok {
- s.errorf("%q is not a defined function", name)
- }
- return s.evalCall(dot, function, cmd, name, args, final)
-}
-
-// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
-// The 'final' argument represents the return value from the preceding
-// value of the pipeline, if any.
-func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
- if !receiver.IsValid() {
- return zero
- }
- typ := receiver.Type()
- receiver, _ = indirect(receiver)
- // Unless it's an interface, need to get to a value of type *T to guarantee
- // we see all methods of T and *T.
- ptr := receiver
- if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
- ptr = ptr.Addr()
- }
- if method := ptr.MethodByName(fieldName); method.IsValid() {
- return s.evalCall(dot, method, node, fieldName, args, final)
- }
- hasArgs := len(args) > 1 || final.IsValid()
- // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
- receiver, isNil := indirect(receiver)
- if isNil {
- s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
- }
- switch receiver.Kind() {
- case reflect.Struct:
- tField, ok := receiver.Type().FieldByName(fieldName)
- if ok {
- field := receiver.FieldByIndex(tField.Index)
- if tField.PkgPath != "" { // field is unexported
- s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
- }
- // If it's a function, we must call it.
- if hasArgs {
- s.errorf("%s has arguments but cannot be invoked as function", fieldName)
- }
- return field
- }
- s.errorf("%s is not a field of struct type %s", fieldName, typ)
- case reflect.Map:
- // If it's a map, attempt to use the field name as a key.
- nameVal := reflect.ValueOf(fieldName)
- if nameVal.Type().AssignableTo(receiver.Type().Key()) {
- if hasArgs {
- s.errorf("%s is not a method but has arguments", fieldName)
- }
- return receiver.MapIndex(nameVal)
- }
- }
- s.errorf("can't evaluate field %s in type %s", fieldName, typ)
- panic("not reached")
-}
-
-var (
- errorType = reflect.TypeOf((*error)(nil)).Elem()
- fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-)
-
-// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
-// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
-// as the function itself.
-func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
- if args != nil {
- args = args[1:] // Zeroth arg is function name/node; not passed to function.
- }
- typ := fun.Type()
- numIn := len(args)
- if final.IsValid() {
- numIn++
- }
- numFixed := len(args)
- if typ.IsVariadic() {
- numFixed = typ.NumIn() - 1 // last arg is the variadic one.
- if numIn < numFixed {
- s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
- }
- } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
- s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
- }
- if !goodFunc(typ) {
- // TODO: This could still be a confusing error; maybe goodFunc should provide info.
- s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
- }
- // Build the arg list.
- argv := make([]reflect.Value, numIn)
- // Args must be evaluated. Fixed args first.
- i := 0
- for ; i < numFixed && i < len(args); i++ {
- argv[i] = s.evalArg(dot, typ.In(i), args[i])
- }
- // Now the ... args.
- if typ.IsVariadic() {
- argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
- for ; i < len(args); i++ {
- argv[i] = s.evalArg(dot, argType, args[i])
- }
- }
- // Add final value if necessary.
- if final.IsValid() {
- t := typ.In(typ.NumIn() - 1)
- if typ.IsVariadic() {
- t = t.Elem()
- }
- argv[i] = s.validateType(final, t)
- }
- result := fun.Call(argv)
- // If we have an error that is not nil, stop execution and return that error to the caller.
- if len(result) == 2 && !result[1].IsNil() {
- s.at(node)
- s.errorf("error calling %s: %s", name, result[1].Interface().(error))
- }
- return result[0]
-}
-
-// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
-func canBeNil(typ reflect.Type) bool {
- switch typ.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return true
- }
- return false
-}
-
-// validateType guarantees that the value is valid and assignable to the type.
-func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
- if !value.IsValid() {
- if typ == nil || canBeNil(typ) {
- // An untyped nil interface{}. Accept as a proper nil value.
- return reflect.Zero(typ)
- }
- s.errorf("invalid value; expected %s", typ)
- }
- if typ != nil && !value.Type().AssignableTo(typ) {
- if value.Kind() == reflect.Interface && !value.IsNil() {
- value = value.Elem()
- if value.Type().AssignableTo(typ) {
- return value
- }
- // fallthrough
- }
- // Does one dereference or indirection work? We could do more, as we
- // do with method receivers, but that gets messy and method receivers
- // are much more constrained, so it makes more sense there than here.
- // Besides, one is almost always all you need.
- switch {
- case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
- value = value.Elem()
- if !value.IsValid() {
- s.errorf("dereference of nil pointer of type %s", typ)
- }
- case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
- value = value.Addr()
- default:
- s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
- }
- }
- return value
-}
-
-func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- switch arg := n.(type) {
- case *parse.DotNode:
- return s.validateType(dot, typ)
- case *parse.NilNode:
- if canBeNil(typ) {
- return reflect.Zero(typ)
- }
- s.errorf("cannot assign nil to %s", typ)
- case *parse.FieldNode:
- return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
- case *parse.VariableNode:
- return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
- case *parse.PipeNode:
- return s.validateType(s.evalPipeline(dot, arg), typ)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, arg, arg, nil, zero)
- case *parse.ChainNode:
- return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
- }
- switch typ.Kind() {
- case reflect.Bool:
- return s.evalBool(typ, n)
- case reflect.Complex64, reflect.Complex128:
- return s.evalComplex(typ, n)
- case reflect.Float32, reflect.Float64:
- return s.evalFloat(typ, n)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return s.evalInteger(typ, n)
- case reflect.Interface:
- if typ.NumMethod() == 0 {
- return s.evalEmptyInterface(dot, n)
- }
- case reflect.String:
- return s.evalString(typ, n)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return s.evalUnsignedInteger(typ, n)
- }
- s.errorf("can't handle %s for arg of type %s", n, typ)
- panic("not reached")
-}
-
-func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.BoolNode); ok {
- value := reflect.New(typ).Elem()
- value.SetBool(n.True)
- return value
- }
- s.errorf("expected bool; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.StringNode); ok {
- value := reflect.New(typ).Elem()
- value.SetString(n.Text)
- return value
- }
- s.errorf("expected string; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
- value := reflect.New(typ).Elem()
- value.SetInt(n.Int64)
- return value
- }
- s.errorf("expected integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
- value := reflect.New(typ).Elem()
- value.SetUint(n.Uint64)
- return value
- }
- s.errorf("expected unsigned integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
- value := reflect.New(typ).Elem()
- value.SetFloat(n.Float64)
- return value
- }
- s.errorf("expected float; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
- value := reflect.New(typ).Elem()
- value.SetComplex(n.Complex128)
- return value
- }
- s.errorf("expected complex; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
- s.at(n)
- switch n := n.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(n.True)
- case *parse.DotNode:
- return dot
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, nil, zero)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, n, n, nil, zero)
- case *parse.NilNode:
- // NilNode is handled in evalArg, the only place that calls here.
- s.errorf("evalEmptyInterface: nil (can't happen)")
- case *parse.NumberNode:
- return s.idealConstant(n)
- case *parse.StringNode:
- return reflect.ValueOf(n.Text)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, nil, zero)
- case *parse.PipeNode:
- return s.evalPipeline(dot, n)
- }
- s.errorf("can't handle assignment of %s to empty interface argument", n)
- panic("not reached")
-}
-
-// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
-// We indirect through pointers and empty interfaces (only) because
-// non-empty interfaces have methods we might need.
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
- for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
- if v.IsNil() {
- return v, true
- }
- if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
- break
- }
- }
- return v, false
-}
-
-// printValue writes the textual representation of the value to the output of
-// the template.
-func (s *state) printValue(n parse.Node, v reflect.Value) {
- s.at(n)
- iface, ok := printableValue(v)
- if !ok {
- s.errorf("can't print %s of type %s", n, v.Type())
- }
- fmt.Fprint(s.wr, iface)
-}
-
-// printableValue returns the, possibly indirected, interface value inside v that
-// is best for a call to formatted printer.
-func printableValue(v reflect.Value) (interface{}, bool) {
- if v.Kind() == reflect.Ptr {
- v, _ = indirect(v) // fmt.Fprint handles nil.
- }
- if !v.IsValid() {
- return "", true
- }
-
- if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
- if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
- v = v.Addr()
- } else {
- switch v.Kind() {
- case reflect.Chan, reflect.Func:
- return nil, false
- }
- }
- }
- return v.Interface(), true
-}
-
-// Types to help sort the keys in a map for reproducible output.
-
-type rvs []reflect.Value
-
-func (x rvs) Len() int { return len(x) }
-func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-type rvInts struct{ rvs }
-
-func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
-
-type rvUints struct{ rvs }
-
-func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
-
-type rvFloats struct{ rvs }
-
-func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
-
-type rvStrings struct{ rvs }
-
-func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
-
-// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
-func sortKeys(v []reflect.Value) []reflect.Value {
- if len(v) <= 1 {
- return v
- }
- switch v[0].Kind() {
- case reflect.Float32, reflect.Float64:
- sort.Sort(rvFloats{v})
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- sort.Sort(rvInts{v})
- case reflect.String:
- sort.Sort(rvStrings{v})
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- sort.Sort(rvUints{v})
- }
- return v
-}
diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go
deleted file mode 100644
index 39ee5ed68..000000000
--- a/vendor/github.com/alecthomas/template/funcs.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net/url"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. In that case, if the second (error)
-// return value evaluates to non-nil during execution, execution terminates and
-// Execute returns that error.
-type FuncMap map[string]interface{}
-
-var builtins = FuncMap{
- "and": and,
- "call": call,
- "html": HTMLEscaper,
- "index": index,
- "js": JSEscaper,
- "len": length,
- "not": not,
- "or": or,
- "print": fmt.Sprint,
- "printf": fmt.Sprintf,
- "println": fmt.Sprintln,
- "urlquery": URLQueryEscaper,
-
- // Comparisons
- "eq": eq, // ==
- "ge": ge, // >=
- "gt": gt, // >
- "le": le, // <=
- "lt": lt, // <
- "ne": ne, // !=
-}
-
-var builtinFuncs = createValueFuncs(builtins)
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
- addValueFuncs(m, funcMap)
- return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
- for name, fn := range in {
- v := reflect.ValueOf(fn)
- if v.Kind() != reflect.Func {
- panic("value for " + name + " not a function")
- }
- if !goodFunc(v.Type()) {
- panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
- }
- out[name] = v
- }
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
- for name, fn := range in {
- out[name] = fn
- }
-}
-
-// goodFunc checks that the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
- // We allow functions with 1 result or 2 results where the second is an error.
- switch {
- case typ.NumOut() == 1:
- return true
- case typ.NumOut() == 2 && typ.Out(1) == errorType:
- return true
- }
- return false
-}
-
-// findFunction looks for a function in the template, and global map.
-func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
- if tmpl != nil && tmpl.common != nil {
- if fn := tmpl.execFuncs[name]; fn.IsValid() {
- return fn, true
- }
- }
- if fn := builtinFuncs[name]; fn.IsValid() {
- return fn, true
- }
- return reflect.Value{}, false
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item interface{}, indices ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(item)
- for _, i := range indices {
- index := reflect.ValueOf(i)
- var isNil bool
- if v, isNil = indirect(v); isNil {
- return nil, fmt.Errorf("index of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- var x int64
- switch index.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- x = index.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- x = int64(index.Uint())
- default:
- return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
- }
- if x < 0 || x >= int64(v.Len()) {
- return nil, fmt.Errorf("index out of range: %d", x)
- }
- v = v.Index(int(x))
- case reflect.Map:
- if !index.IsValid() {
- index = reflect.Zero(v.Type().Key())
- }
- if !index.Type().AssignableTo(v.Type().Key()) {
- return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
- }
- if x := v.MapIndex(index); x.IsValid() {
- v = x
- } else {
- v = reflect.Zero(v.Type().Elem())
- }
- default:
- return nil, fmt.Errorf("can't index item of type %s", v.Type())
- }
- }
- return v.Interface(), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item interface{}) (int, error) {
- v, isNil := indirect(reflect.ValueOf(item))
- if isNil {
- return 0, fmt.Errorf("len of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.Len(), nil
- }
- return 0, fmt.Errorf("len of type %s", v.Type())
-}
-
-// Function invocation
-
-// call returns the result of evaluating the first argument as a function.
-// The function must return 1 result, or 2 results, the second of which is an error.
-func call(fn interface{}, args ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(fn)
- typ := v.Type()
- if typ.Kind() != reflect.Func {
- return nil, fmt.Errorf("non-function of type %s", typ)
- }
- if !goodFunc(typ) {
- return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
- }
- numIn := typ.NumIn()
- var dddType reflect.Type
- if typ.IsVariadic() {
- if len(args) < numIn-1 {
- return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
- }
- dddType = typ.In(numIn - 1).Elem()
- } else {
- if len(args) != numIn {
- return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
- }
- }
- argv := make([]reflect.Value, len(args))
- for i, arg := range args {
- value := reflect.ValueOf(arg)
- // Compute the expected type. Clumsy because of variadics.
- var argType reflect.Type
- if !typ.IsVariadic() || i < numIn-1 {
- argType = typ.In(i)
- } else {
- argType = dddType
- }
- if !value.IsValid() && canBeNil(argType) {
- value = reflect.Zero(argType)
- }
- if !value.Type().AssignableTo(argType) {
- return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
- }
- argv[i] = value
- }
- result := v.Call(argv)
- if len(result) == 2 && !result[1].IsNil() {
- return result[0].Interface(), result[1].Interface().(error)
- }
- return result[0].Interface(), nil
-}
-
-// Boolean logic.
-
-func truth(a interface{}) bool {
- t, _ := isTrue(reflect.ValueOf(a))
- return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 interface{}, args ...interface{}) interface{} {
- if !truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if !truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 interface{}, args ...interface{}) interface{} {
- if truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg interface{}) (truth bool) {
- truth, _ = isTrue(reflect.ValueOf(arg))
- return !truth
-}
-
-// Comparison.
-
-// TODO: Perhaps allow comparison between signed and unsigned integers.
-
-var (
- errBadComparisonType = errors.New("invalid type for comparison")
- errBadComparison = errors.New("incompatible types for comparison")
- errNoComparison = errors.New("missing argument for comparison")
-)
-
-type kind int
-
-const (
- invalidKind kind = iota
- boolKind
- complexKind
- intKind
- floatKind
- integerKind
- stringKind
- uintKind
-)
-
-func basicKind(v reflect.Value) (kind, error) {
- switch v.Kind() {
- case reflect.Bool:
- return boolKind, nil
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return intKind, nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return uintKind, nil
- case reflect.Float32, reflect.Float64:
- return floatKind, nil
- case reflect.Complex64, reflect.Complex128:
- return complexKind, nil
- case reflect.String:
- return stringKind, nil
- }
- return invalidKind, errBadComparisonType
-}
-
-// eq evaluates the comparison a == b || a == c || ...
-func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- if len(arg2) == 0 {
- return false, errNoComparison
- }
- for _, arg := range arg2 {
- v2 := reflect.ValueOf(arg)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind:
- truth = v1.Bool() == v2.Bool()
- case complexKind:
- truth = v1.Complex() == v2.Complex()
- case floatKind:
- truth = v1.Float() == v2.Float()
- case intKind:
- truth = v1.Int() == v2.Int()
- case stringKind:
- truth = v1.String() == v2.String()
- case uintKind:
- truth = v1.Uint() == v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- if truth {
- return true, nil
- }
- }
- return false, nil
-}
-
-// ne evaluates the comparison a != b.
-func ne(arg1, arg2 interface{}) (bool, error) {
- // != is the inverse of ==.
- equal, err := eq(arg1, arg2)
- return !equal, err
-}
-
-// lt evaluates the comparison a < b.
-func lt(arg1, arg2 interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- v2 := reflect.ValueOf(arg2)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind, complexKind:
- return false, errBadComparisonType
- case floatKind:
- truth = v1.Float() < v2.Float()
- case intKind:
- truth = v1.Int() < v2.Int()
- case stringKind:
- truth = v1.String() < v2.String()
- case uintKind:
- truth = v1.Uint() < v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- return truth, nil
-}
-
-// le evaluates the comparison <= b.
-func le(arg1, arg2 interface{}) (bool, error) {
- // <= is < or ==.
- lessThan, err := lt(arg1, arg2)
- if lessThan || err != nil {
- return lessThan, err
- }
- return eq(arg1, arg2)
-}
-
-// gt evaluates the comparison a > b.
-func gt(arg1, arg2 interface{}) (bool, error) {
- // > is the inverse of <=.
- lessOrEqual, err := le(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessOrEqual, nil
-}
-
-// ge evaluates the comparison a >= b.
-func ge(arg1, arg2 interface{}) (bool, error) {
- // >= is the inverse of <.
- lessThan, err := lt(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessThan, nil
-}
-
-// HTML escaping.
-
-var (
- htmlQuot = []byte(""") // shorter than """
- htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
- htmlAmp = []byte("&")
- htmlLt = []byte("<")
- htmlGt = []byte(">")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
- last := 0
- for i, c := range b {
- var html []byte
- switch c {
- case '"':
- html = htmlQuot
- case '\'':
- html = htmlApos
- case '&':
- html = htmlAmp
- case '<':
- html = htmlLt
- case '>':
- html = htmlGt
- default:
- continue
- }
- w.Write(b[last:i])
- w.Write(html)
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexAny(s, `'"&<>`) < 0 {
- return s
- }
- var b bytes.Buffer
- HTMLEscape(&b, []byte(s))
- return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
- return HTMLEscapeString(evalArgs(args))
-}
-
-// JavaScript escaping.
-
-var (
- jsLowUni = []byte(`\u00`)
- hex = []byte("0123456789ABCDEF")
-
- jsBackslash = []byte(`\\`)
- jsApos = []byte(`\'`)
- jsQuot = []byte(`\"`)
- jsLt = []byte(`\x3C`)
- jsGt = []byte(`\x3E`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
- last := 0
- for i := 0; i < len(b); i++ {
- c := b[i]
-
- if !jsIsSpecial(rune(c)) {
- // fast path: nothing to do
- continue
- }
- w.Write(b[last:i])
-
- if c < utf8.RuneSelf {
- // Quotes, slashes and angle brackets get quoted.
- // Control characters get written as \u00XX.
- switch c {
- case '\\':
- w.Write(jsBackslash)
- case '\'':
- w.Write(jsApos)
- case '"':
- w.Write(jsQuot)
- case '<':
- w.Write(jsLt)
- case '>':
- w.Write(jsGt)
- default:
- w.Write(jsLowUni)
- t, b := c>>4, c&0x0f
- w.Write(hex[t : t+1])
- w.Write(hex[b : b+1])
- }
- } else {
- // Unicode rune.
- r, size := utf8.DecodeRune(b[i:])
- if unicode.IsPrint(r) {
- w.Write(b[i : i+size])
- } else {
- fmt.Fprintf(w, "\\u%04X", r)
- }
- i += size - 1
- }
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexFunc(s, jsIsSpecial) < 0 {
- return s
- }
- var b bytes.Buffer
- JSEscape(&b, []byte(s))
- return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
- switch r {
- case '\\', '\'', '"', '<', '>':
- return true
- }
- return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
- return JSEscapeString(evalArgs(args))
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
- return url.QueryEscape(evalArgs(args))
-}
-
-// evalArgs formats the list of arguments into a string. It is therefore equivalent to
-// fmt.Sprint(args...)
-// except that each argument is indirected (if a pointer), as required,
-// using the same rules as the default string evaluation during template
-// execution.
-func evalArgs(args []interface{}) string {
- ok := false
- var s string
- // Fast path for simple common case.
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- for i, arg := range args {
- a, ok := printableValue(reflect.ValueOf(arg))
- if ok {
- args[i] = a
- } // else left fmt do its thing
- }
- s = fmt.Sprint(args...)
- }
- return s
-}
diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod
deleted file mode 100644
index a70670ae2..000000000
--- a/vendor/github.com/alecthomas/template/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/alecthomas/template
diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go
deleted file mode 100644
index 3636fb54d..000000000
--- a/vendor/github.com/alecthomas/template/helper.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper functions to make constructing templates easier.
-
-package template
-
-import (
- "fmt"
- "io/ioutil"
- "path/filepath"
-)
-
-// Functions and methods to parse templates.
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-// var t = template.Must(template.New("name").Parse("text"))
-func Must(t *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// ParseFiles creates a new Template and parses the template definitions from
-// the named files. The returned template's name will have the (base) name and
-// (parsed) contents of the first file. There must be at least one file.
-// If an error occurs, parsing stops and the returned *Template is nil.
-func ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(nil, filenames...)
-}
-
-// ParseFiles parses the named files and associates the resulting templates with
-// t. If an error occurs, parsing stops and the returned template is nil;
-// otherwise it is t. There must be at least one file.
-func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(t, filenames...)
-}
-
-// parseFiles is the helper for the method and function. If the argument
-// template is nil, it is created from the first file.
-func parseFiles(t *Template, filenames ...string) (*Template, error) {
- if len(filenames) == 0 {
- // Not really a problem, but be consistent.
- return nil, fmt.Errorf("template: no files named in call to ParseFiles")
- }
- for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- s := string(b)
- name := filepath.Base(filename)
- // First template becomes return value if not already defined,
- // and we use that one for subsequent New calls to associate
- // all the templates together. Also, if this file has the same name
- // as t, this file becomes the contents of t, so
- // t, err := New(name).Funcs(xxx).ParseFiles(name)
- // works. Otherwise we create a new template associated with t.
- var tmpl *Template
- if t == nil {
- t = New(name)
- }
- if name == t.Name() {
- tmpl = t
- } else {
- tmpl = t.New(name)
- }
- _, err = tmpl.Parse(s)
- if err != nil {
- return nil, err
- }
- }
- return t, nil
-}
-
-// ParseGlob creates a new Template and parses the template definitions from the
-// files identified by the pattern, which must match at least one file. The
-// returned template will have the (base) name and (parsed) contents of the
-// first file matched by the pattern. ParseGlob is equivalent to calling
-// ParseFiles with the list of files matched by the pattern.
-func ParseGlob(pattern string) (*Template, error) {
- return parseGlob(nil, pattern)
-}
-
-// ParseGlob parses the template definitions in the files identified by the
-// pattern and associates the resulting templates with t. The pattern is
-// processed by filepath.Glob and must match at least one file. ParseGlob is
-// equivalent to calling t.ParseFiles with the list of files matched by the
-// pattern.
-func (t *Template) ParseGlob(pattern string) (*Template, error) {
- return parseGlob(t, pattern)
-}
-
-// parseGlob is the implementation of the function and method ParseGlob.
-func parseGlob(t *Template, pattern string) (*Template, error) {
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- if len(filenames) == 0 {
- return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
- }
- return parseFiles(t, filenames...)
-}
diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go
deleted file mode 100644
index 55f1c051e..000000000
--- a/vendor/github.com/alecthomas/template/parse/lex.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
- typ itemType // The type of this item.
- pos Pos // The starting position, in bytes, of this item in the input string.
- val string // The value of this item.
-}
-
-func (i item) String() string {
- switch {
- case i.typ == itemEOF:
- return "EOF"
- case i.typ == itemError:
- return i.val
- case i.typ > itemKeyword:
- return fmt.Sprintf("<%s>", i.val)
- case len(i.val) > 10:
- return fmt.Sprintf("%.10q...", i.val)
- }
- return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
- itemError itemType = iota // error occurred; value is text of error
- itemBool // boolean constant
- itemChar // printable ASCII character; grab bag for comma etc.
- itemCharConstant // character constant
- itemComplex // complex constant (1+2i); imaginary is just a number
- itemColonEquals // colon-equals (':=') introducing a declaration
- itemEOF
- itemField // alphanumeric identifier starting with '.'
- itemIdentifier // alphanumeric identifier not starting with '.'
- itemLeftDelim // left action delimiter
- itemLeftParen // '(' inside action
- itemNumber // simple number, including imaginary
- itemPipe // pipe symbol
- itemRawString // raw quoted string (includes quotes)
- itemRightDelim // right action delimiter
- itemElideNewline // elide newline after right delim
- itemRightParen // ')' inside action
- itemSpace // run of spaces separating arguments
- itemString // quoted string (includes quotes)
- itemText // plain text
- itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
- // Keywords appear after all the rest.
- itemKeyword // used only to delimit the keywords
- itemDot // the cursor, spelled '.'
- itemDefine // define keyword
- itemElse // else keyword
- itemEnd // end keyword
- itemIf // if keyword
- itemNil // the untyped nil constant, easiest to treat as a keyword
- itemRange // range keyword
- itemTemplate // template keyword
- itemWith // with keyword
-)
-
-var key = map[string]itemType{
- ".": itemDot,
- "define": itemDefine,
- "else": itemElse,
- "end": itemEnd,
- "if": itemIf,
- "range": itemRange,
- "nil": itemNil,
- "template": itemTemplate,
- "with": itemWith,
-}
-
-const eof = -1
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
- name string // the name of the input; used only for error reports
- input string // the string being scanned
- leftDelim string // start of action
- rightDelim string // end of action
- state stateFn // the next lexing function to enter
- pos Pos // current position in the input
- start Pos // start position of this item
- width Pos // width of last rune read from input
- lastPos Pos // position of most recent item returned by nextItem
- items chan item // channel of scanned items
- parenDepth int // nesting depth of ( ) exprs
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() rune {
- if int(l.pos) >= len(l.input) {
- l.width = 0
- return eof
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = Pos(w)
- l.pos += l.width
- return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
- l.pos -= l.width
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
- l.items <- item{t, l.start, l.input[l.start:l.pos]}
- l.start = l.pos
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
- l.start = l.pos
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
- if strings.IndexRune(valid, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
- for strings.IndexRune(valid, l.next()) >= 0 {
- }
- l.backup()
-}
-
-// lineNumber reports which line we're on, based on the position of
-// the previous item returned by nextItem. Doing it this way
-// means we don't have to worry about peek double counting.
-func (l *lexer) lineNumber() int {
- return 1 + strings.Count(l.input[:l.lastPos], "\n")
-}
-
-// errorf returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.nextItem.
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
- l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
- return nil
-}
-
-// nextItem returns the next item from the input.
-func (l *lexer) nextItem() item {
- item := <-l.items
- l.lastPos = item.pos
- return item
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input, left, right string) *lexer {
- if left == "" {
- left = leftDelim
- }
- if right == "" {
- right = rightDelim
- }
- l := &lexer{
- name: name,
- input: input,
- leftDelim: left,
- rightDelim: right,
- items: make(chan item),
- }
- go l.run()
- return l
-}
-
-// run runs the state machine for the lexer.
-func (l *lexer) run() {
- for l.state = lexText; l.state != nil; {
- l.state = l.state(l)
- }
-}
-
-// state functions
-
-const (
- leftDelim = "{{"
- rightDelim = "}}"
- leftComment = "/*"
- rightComment = "*/"
-)
-
-// lexText scans until an opening action delimiter, "{{".
-func lexText(l *lexer) stateFn {
- for {
- if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
- if l.pos > l.start {
- l.emit(itemText)
- }
- return lexLeftDelim
- }
- if l.next() == eof {
- break
- }
- }
- // Correctly reached EOF.
- if l.pos > l.start {
- l.emit(itemText)
- }
- l.emit(itemEOF)
- return nil
-}
-
-// lexLeftDelim scans the left delimiter, which is known to be present.
-func lexLeftDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.leftDelim))
- if strings.HasPrefix(l.input[l.pos:], leftComment) {
- return lexComment
- }
- l.emit(itemLeftDelim)
- l.parenDepth = 0
- return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
- l.pos += Pos(len(leftComment))
- i := strings.Index(l.input[l.pos:], rightComment)
- if i < 0 {
- return l.errorf("unclosed comment")
- }
- l.pos += Pos(i + len(rightComment))
- if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- return l.errorf("comment ends before closing delimiter")
-
- }
- l.pos += Pos(len(l.rightDelim))
- l.ignore()
- return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present.
-func lexRightDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.rightDelim))
- l.emit(itemRightDelim)
- if l.peek() == '\\' {
- l.pos++
- l.emit(itemElideNewline)
- }
- return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
- // Either number, quoted string, or identifier.
- // Spaces separate arguments; runs of spaces turn into itemSpace.
- // Pipe symbols separate and are emitted.
- if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- if l.parenDepth == 0 {
- return lexRightDelim
- }
- return l.errorf("unclosed left paren")
- }
- switch r := l.next(); {
- case r == eof || isEndOfLine(r):
- return l.errorf("unclosed action")
- case isSpace(r):
- return lexSpace
- case r == ':':
- if l.next() != '=' {
- return l.errorf("expected :=")
- }
- l.emit(itemColonEquals)
- case r == '|':
- l.emit(itemPipe)
- case r == '"':
- return lexQuote
- case r == '`':
- return lexRawQuote
- case r == '$':
- return lexVariable
- case r == '\'':
- return lexChar
- case r == '.':
- // special look-ahead for ".field" so we don't break l.backup().
- if l.pos < Pos(len(l.input)) {
- r := l.input[l.pos]
- if r < '0' || '9' < r {
- return lexField
- }
- }
- fallthrough // '.' can start a number.
- case r == '+' || r == '-' || ('0' <= r && r <= '9'):
- l.backup()
- return lexNumber
- case isAlphaNumeric(r):
- l.backup()
- return lexIdentifier
- case r == '(':
- l.emit(itemLeftParen)
- l.parenDepth++
- return lexInsideAction
- case r == ')':
- l.emit(itemRightParen)
- l.parenDepth--
- if l.parenDepth < 0 {
- return l.errorf("unexpected right paren %#U", r)
- }
- return lexInsideAction
- case r <= unicode.MaxASCII && unicode.IsPrint(r):
- l.emit(itemChar)
- return lexInsideAction
- default:
- return l.errorf("unrecognized character in action: %#U", r)
- }
- return lexInsideAction
-}
-
-// lexSpace scans a run of space characters.
-// One space has already been seen.
-func lexSpace(l *lexer) stateFn {
- for isSpace(l.peek()) {
- l.next()
- }
- l.emit(itemSpace)
- return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
- for {
- switch r := l.next(); {
- case isAlphaNumeric(r):
- // absorb.
- default:
- l.backup()
- word := l.input[l.start:l.pos]
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- switch {
- case key[word] > itemKeyword:
- l.emit(key[word])
- case word[0] == '.':
- l.emit(itemField)
- case word == "true", word == "false":
- l.emit(itemBool)
- default:
- l.emit(itemIdentifier)
- }
- break Loop
- }
- }
- return lexInsideAction
-}
-
-// lexField scans a field: .Alphanumeric.
-// The . has been scanned.
-func lexField(l *lexer) stateFn {
- return lexFieldOrVariable(l, itemField)
-}
-
-// lexVariable scans a Variable: $Alphanumeric.
-// The $ has been scanned.
-func lexVariable(l *lexer) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "$".
- l.emit(itemVariable)
- return lexInsideAction
- }
- return lexFieldOrVariable(l, itemVariable)
-}
-
-// lexVariable scans a field or variable: [.$]Alphanumeric.
-// The . or $ has been scanned.
-func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "." or "$".
- if typ == itemVariable {
- l.emit(itemVariable)
- } else {
- l.emit(itemDot)
- }
- return lexInsideAction
- }
- var r rune
- for {
- r = l.next()
- if !isAlphaNumeric(r) {
- l.backup()
- break
- }
- }
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- l.emit(typ)
- return lexInsideAction
-}
-
-// atTerminator reports whether the input is at valid termination character to
-// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
-// like "$x+2" not being acceptable without a space, in case we decide one
-// day to implement arithmetic.
-func (l *lexer) atTerminator() bool {
- r := l.peek()
- if isSpace(r) || isEndOfLine(r) {
- return true
- }
- switch r {
- case eof, '.', ',', '|', ':', ')', '(':
- return true
- }
- // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
- // succeed but should fail) but only in extremely rare cases caused by willfully
- // bad choice of delimiter.
- if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
- return true
- }
- return false
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parser.
-func lexChar(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated character constant")
- case '\'':
- break Loop
- }
- }
- l.emit(itemCharConstant)
- return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
- if !l.scanNumber() {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- if sign := l.peek(); sign == '+' || sign == '-' {
- // Complex: 1+2i. No spaces, must end in 'i'.
- if !l.scanNumber() || l.input[l.pos-1] != 'i' {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- l.emit(itemComplex)
- } else {
- l.emit(itemNumber)
- }
- return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
- // Optional leading sign.
- l.accept("+-")
- // Is it hex?
- digits := "0123456789"
- if l.accept("0") && l.accept("xX") {
- digits = "0123456789abcdefABCDEF"
- }
- l.acceptRun(digits)
- if l.accept(".") {
- l.acceptRun(digits)
- }
- if l.accept("eE") {
- l.accept("+-")
- l.acceptRun("0123456789")
- }
- // Is it imaginary?
- l.accept("i")
- // Next thing mustn't be alphanumeric.
- if isAlphaNumeric(l.peek()) {
- l.next()
- return false
- }
- return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated quoted string")
- case '"':
- break Loop
- }
- }
- l.emit(itemString)
- return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case eof, '\n':
- return l.errorf("unterminated raw quoted string")
- case '`':
- break Loop
- }
- }
- l.emit(itemRawString)
- return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
- return r == ' ' || r == '\t'
-}
-
-// isEndOfLine reports whether r is an end-of-line character.
-func isEndOfLine(r rune) bool {
- return r == '\r' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go
deleted file mode 100644
index 55c37f6db..000000000
--- a/vendor/github.com/alecthomas/template/parse/node.go
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parse nodes.
-
-package parse
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
-)
-
-var textFormat = "%s" // Changed to "%q" in tests for better error messages.
-
-// A Node is an element in the parse tree. The interface is trivial.
-// The interface contains an unexported method so that only
-// types local to this package can satisfy it.
-type Node interface {
- Type() NodeType
- String() string
- // Copy does a deep copy of the Node and all its components.
- // To avoid type assertions, some XxxNodes also have specialized
- // CopyXxx methods that return *XxxNode.
- Copy() Node
- Position() Pos // byte position of start of node in full original input string
- // tree returns the containing *Tree.
- // It is unexported so all implementations of Node are in this package.
- tree() *Tree
-}
-
-// NodeType identifies the type of a parse tree node.
-type NodeType int
-
-// Pos represents a byte position in the original input text from which
-// this template was parsed.
-type Pos int
-
-func (p Pos) Position() Pos {
- return p
-}
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
- return t
-}
-
-const (
- NodeText NodeType = iota // Plain text.
- NodeAction // A non-control action such as a field evaluation.
- NodeBool // A boolean constant.
- NodeChain // A sequence of field accesses.
- NodeCommand // An element of a pipeline.
- NodeDot // The cursor, dot.
- nodeElse // An else action. Not added to tree.
- nodeEnd // An end action. Not added to tree.
- NodeField // A field or method name.
- NodeIdentifier // An identifier; always a function name.
- NodeIf // An if action.
- NodeList // A list of Nodes.
- NodeNil // An untyped nil constant.
- NodeNumber // A numerical constant.
- NodePipe // A pipeline of commands.
- NodeRange // A range action.
- NodeString // A string constant.
- NodeTemplate // A template invocation action.
- NodeVariable // A $ variable.
- NodeWith // A with action.
-)
-
-// Nodes.
-
-// ListNode holds a sequence of nodes.
-type ListNode struct {
- NodeType
- Pos
- tr *Tree
- Nodes []Node // The element nodes in lexical order.
-}
-
-func (t *Tree) newList(pos Pos) *ListNode {
- return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
-}
-
-func (l *ListNode) append(n Node) {
- l.Nodes = append(l.Nodes, n)
-}
-
-func (l *ListNode) tree() *Tree {
- return l.tr
-}
-
-func (l *ListNode) String() string {
- b := new(bytes.Buffer)
- for _, n := range l.Nodes {
- fmt.Fprint(b, n)
- }
- return b.String()
-}
-
-func (l *ListNode) CopyList() *ListNode {
- if l == nil {
- return l
- }
- n := l.tr.newList(l.Pos)
- for _, elem := range l.Nodes {
- n.append(elem.Copy())
- }
- return n
-}
-
-func (l *ListNode) Copy() Node {
- return l.CopyList()
-}
-
-// TextNode holds plain text.
-type TextNode struct {
- NodeType
- Pos
- tr *Tree
- Text []byte // The text; may span newlines.
-}
-
-func (t *Tree) newText(pos Pos, text string) *TextNode {
- return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
-}
-
-func (t *TextNode) String() string {
- return fmt.Sprintf(textFormat, t.Text)
-}
-
-func (t *TextNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TextNode) Copy() Node {
- return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
-}
-
-// PipeNode holds a pipeline with optional declaration
-type PipeNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Decl []*VariableNode // Variable declarations in lexical order.
- Cmds []*CommandNode // The commands in lexical order.
-}
-
-func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
- return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
-}
-
-func (p *PipeNode) append(command *CommandNode) {
- p.Cmds = append(p.Cmds, command)
-}
-
-func (p *PipeNode) String() string {
- s := ""
- if len(p.Decl) > 0 {
- for i, v := range p.Decl {
- if i > 0 {
- s += ", "
- }
- s += v.String()
- }
- s += " := "
- }
- for i, c := range p.Cmds {
- if i > 0 {
- s += " | "
- }
- s += c.String()
- }
- return s
-}
-
-func (p *PipeNode) tree() *Tree {
- return p.tr
-}
-
-func (p *PipeNode) CopyPipe() *PipeNode {
- if p == nil {
- return p
- }
- var decl []*VariableNode
- for _, d := range p.Decl {
- decl = append(decl, d.Copy().(*VariableNode))
- }
- n := p.tr.newPipeline(p.Pos, p.Line, decl)
- for _, c := range p.Cmds {
- n.append(c.Copy().(*CommandNode))
- }
- return n
-}
-
-func (p *PipeNode) Copy() Node {
- return p.CopyPipe()
-}
-
-// ActionNode holds an action (something bounded by delimiters).
-// Control actions have their own nodes; ActionNode represents simple
-// ones such as field evaluations and parenthesized pipelines.
-type ActionNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline in the action.
-}
-
-func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
- return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
-}
-
-func (a *ActionNode) String() string {
- return fmt.Sprintf("{{%s}}", a.Pipe)
-
-}
-
-func (a *ActionNode) tree() *Tree {
- return a.tr
-}
-
-func (a *ActionNode) Copy() Node {
- return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
-
-}
-
-// CommandNode holds a command (a pipeline inside an evaluating action).
-type CommandNode struct {
- NodeType
- Pos
- tr *Tree
- Args []Node // Arguments in lexical order: Identifier, field, or constant.
-}
-
-func (t *Tree) newCommand(pos Pos) *CommandNode {
- return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
-}
-
-func (c *CommandNode) append(arg Node) {
- c.Args = append(c.Args, arg)
-}
-
-func (c *CommandNode) String() string {
- s := ""
- for i, arg := range c.Args {
- if i > 0 {
- s += " "
- }
- if arg, ok := arg.(*PipeNode); ok {
- s += "(" + arg.String() + ")"
- continue
- }
- s += arg.String()
- }
- return s
-}
-
-func (c *CommandNode) tree() *Tree {
- return c.tr
-}
-
-func (c *CommandNode) Copy() Node {
- if c == nil {
- return c
- }
- n := c.tr.newCommand(c.Pos)
- for _, c := range c.Args {
- n.append(c.Copy())
- }
- return n
-}
-
-// IdentifierNode holds an identifier.
-type IdentifierNode struct {
- NodeType
- Pos
- tr *Tree
- Ident string // The identifier's name.
-}
-
-// NewIdentifier returns a new IdentifierNode with the given identifier name.
-func NewIdentifier(ident string) *IdentifierNode {
- return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
-}
-
-// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
- i.Pos = pos
- return i
-}
-
-// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
- i.tr = t
- return i
-}
-
-func (i *IdentifierNode) String() string {
- return i.Ident
-}
-
-func (i *IdentifierNode) tree() *Tree {
- return i.tr
-}
-
-func (i *IdentifierNode) Copy() Node {
- return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
-}
-
-// VariableNode holds a list of variable names, possibly with chained field
-// accesses. The dollar sign is part of the (first) name.
-type VariableNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // Variable name and fields in lexical order.
-}
-
-func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
- return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
-}
-
-func (v *VariableNode) String() string {
- s := ""
- for i, id := range v.Ident {
- if i > 0 {
- s += "."
- }
- s += id
- }
- return s
-}
-
-func (v *VariableNode) tree() *Tree {
- return v.tr
-}
-
-func (v *VariableNode) Copy() Node {
- return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
-}
-
-// DotNode holds the special identifier '.'.
-type DotNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newDot(pos Pos) *DotNode {
- return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
-}
-
-func (d *DotNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeDot
-}
-
-func (d *DotNode) String() string {
- return "."
-}
-
-func (d *DotNode) tree() *Tree {
- return d.tr
-}
-
-func (d *DotNode) Copy() Node {
- return d.tr.newDot(d.Pos)
-}
-
-// NilNode holds the special identifier 'nil' representing an untyped nil constant.
-type NilNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newNil(pos Pos) *NilNode {
- return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
-}
-
-func (n *NilNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeNil
-}
-
-func (n *NilNode) String() string {
- return "nil"
-}
-
-func (n *NilNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NilNode) Copy() Node {
- return n.tr.newNil(n.Pos)
-}
-
-// FieldNode holds a field (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The period is dropped from each ident.
-type FieldNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newField(pos Pos, ident string) *FieldNode {
- return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
-}
-
-func (f *FieldNode) String() string {
- s := ""
- for _, id := range f.Ident {
- s += "." + id
- }
- return s
-}
-
-func (f *FieldNode) tree() *Tree {
- return f.tr
-}
-
-func (f *FieldNode) Copy() Node {
- return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
-}
-
-// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The periods are dropped from each ident.
-type ChainNode struct {
- NodeType
- Pos
- tr *Tree
- Node Node
- Field []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
- return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
-}
-
-// Add adds the named field (which should start with a period) to the end of the chain.
-func (c *ChainNode) Add(field string) {
- if len(field) == 0 || field[0] != '.' {
- panic("no dot in field")
- }
- field = field[1:] // Remove leading dot.
- if field == "" {
- panic("empty field")
- }
- c.Field = append(c.Field, field)
-}
-
-func (c *ChainNode) String() string {
- s := c.Node.String()
- if _, ok := c.Node.(*PipeNode); ok {
- s = "(" + s + ")"
- }
- for _, field := range c.Field {
- s += "." + field
- }
- return s
-}
-
-func (c *ChainNode) tree() *Tree {
- return c.tr
-}
-
-func (c *ChainNode) Copy() Node {
- return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
-}
-
-// BoolNode holds a boolean constant.
-type BoolNode struct {
- NodeType
- Pos
- tr *Tree
- True bool // The value of the boolean constant.
-}
-
-func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
- return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
-}
-
-func (b *BoolNode) String() string {
- if b.True {
- return "true"
- }
- return "false"
-}
-
-func (b *BoolNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BoolNode) Copy() Node {
- return b.tr.newBool(b.Pos, b.True)
-}
-
-// NumberNode holds a number: signed or unsigned integer, float, or complex.
-// The value is parsed and stored under all the types that can represent the value.
-// This simulates in a small amount of code the behavior of Go's ideal constants.
-type NumberNode struct {
- NodeType
- Pos
- tr *Tree
- IsInt bool // Number has an integral value.
- IsUint bool // Number has an unsigned integral value.
- IsFloat bool // Number has a floating-point value.
- IsComplex bool // Number is complex.
- Int64 int64 // The signed integer value.
- Uint64 uint64 // The unsigned integer value.
- Float64 float64 // The floating-point value.
- Complex128 complex128 // The complex value.
- Text string // The original textual representation from the input.
-}
-
-func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
- n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
- switch typ {
- case itemCharConstant:
- rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
- if err != nil {
- return nil, err
- }
- if tail != "'" {
- return nil, fmt.Errorf("malformed character constant: %s", text)
- }
- n.Int64 = int64(rune)
- n.IsInt = true
- n.Uint64 = uint64(rune)
- n.IsUint = true
- n.Float64 = float64(rune) // odd but those are the rules.
- n.IsFloat = true
- return n, nil
- case itemComplex:
- // fmt.Sscan can parse the pair, so let it do the work.
- if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
- return nil, err
- }
- n.IsComplex = true
- n.simplifyComplex()
- return n, nil
- }
- // Imaginary constants can only be complex unless they are zero.
- if len(text) > 0 && text[len(text)-1] == 'i' {
- f, err := strconv.ParseFloat(text[:len(text)-1], 64)
- if err == nil {
- n.IsComplex = true
- n.Complex128 = complex(0, f)
- n.simplifyComplex()
- return n, nil
- }
- }
- // Do integer test first so we get 0x123 etc.
- u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
- if err == nil {
- n.IsUint = true
- n.Uint64 = u
- }
- i, err := strconv.ParseInt(text, 0, 64)
- if err == nil {
- n.IsInt = true
- n.Int64 = i
- if i == 0 {
- n.IsUint = true // in case of -0.
- n.Uint64 = u
- }
- }
- // If an integer extraction succeeded, promote the float.
- if n.IsInt {
- n.IsFloat = true
- n.Float64 = float64(n.Int64)
- } else if n.IsUint {
- n.IsFloat = true
- n.Float64 = float64(n.Uint64)
- } else {
- f, err := strconv.ParseFloat(text, 64)
- if err == nil {
- n.IsFloat = true
- n.Float64 = f
- // If a floating-point extraction succeeded, extract the int if needed.
- if !n.IsInt && float64(int64(f)) == f {
- n.IsInt = true
- n.Int64 = int64(f)
- }
- if !n.IsUint && float64(uint64(f)) == f {
- n.IsUint = true
- n.Uint64 = uint64(f)
- }
- }
- }
- if !n.IsInt && !n.IsUint && !n.IsFloat {
- return nil, fmt.Errorf("illegal number syntax: %q", text)
- }
- return n, nil
-}
-
-// simplifyComplex pulls out any other types that are represented by the complex number.
-// These all require that the imaginary part be zero.
-func (n *NumberNode) simplifyComplex() {
- n.IsFloat = imag(n.Complex128) == 0
- if n.IsFloat {
- n.Float64 = real(n.Complex128)
- n.IsInt = float64(int64(n.Float64)) == n.Float64
- if n.IsInt {
- n.Int64 = int64(n.Float64)
- }
- n.IsUint = float64(uint64(n.Float64)) == n.Float64
- if n.IsUint {
- n.Uint64 = uint64(n.Float64)
- }
- }
-}
-
-func (n *NumberNode) String() string {
- return n.Text
-}
-
-func (n *NumberNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NumberNode) Copy() Node {
- nn := new(NumberNode)
- *nn = *n // Easy, fast, correct.
- return nn
-}
-
-// StringNode holds a string constant. The value has been "unquoted".
-type StringNode struct {
- NodeType
- Pos
- tr *Tree
- Quoted string // The original text of the string, with quotes.
- Text string // The string, after quote processing.
-}
-
-func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
- return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
-}
-
-func (s *StringNode) String() string {
- return s.Quoted
-}
-
-func (s *StringNode) tree() *Tree {
- return s.tr
-}
-
-func (s *StringNode) Copy() Node {
- return s.tr.newString(s.Pos, s.Quoted, s.Text)
-}
-
-// endNode represents an {{end}} action.
-// It does not appear in the final parse tree.
-type endNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newEnd(pos Pos) *endNode {
- return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
-}
-
-func (e *endNode) String() string {
- return "{{end}}"
-}
-
-func (e *endNode) tree() *Tree {
- return e.tr
-}
-
-func (e *endNode) Copy() Node {
- return e.tr.newEnd(e.Pos)
-}
-
-// elseNode represents an {{else}} action. Does not appear in the final tree.
-type elseNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
-}
-
-func (t *Tree) newElse(pos Pos, line int) *elseNode {
- return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
-}
-
-func (e *elseNode) Type() NodeType {
- return nodeElse
-}
-
-func (e *elseNode) String() string {
- return "{{else}}"
-}
-
-func (e *elseNode) tree() *Tree {
- return e.tr
-}
-
-func (e *elseNode) Copy() Node {
- return e.tr.newElse(e.Pos, e.Line)
-}
-
-// BranchNode is the common representation of if, range, and with.
-type BranchNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline to be evaluated.
- List *ListNode // What to execute if the value is non-empty.
- ElseList *ListNode // What to execute if the value is empty (nil if absent).
-}
-
-func (b *BranchNode) String() string {
- name := ""
- switch b.NodeType {
- case NodeIf:
- name = "if"
- case NodeRange:
- name = "range"
- case NodeWith:
- name = "with"
- default:
- panic("unknown branch type")
- }
- if b.ElseList != nil {
- return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
- }
- return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
-}
-
-func (b *BranchNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BranchNode) Copy() Node {
- switch b.NodeType {
- case NodeIf:
- return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeRange:
- return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeWith:
- return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- default:
- panic("unknown branch type")
- }
-}
-
-// IfNode represents an {{if}} action and its commands.
-type IfNode struct {
- BranchNode
-}
-
-func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
- return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (i *IfNode) Copy() Node {
- return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
-}
-
-// RangeNode represents a {{range}} action and its commands.
-type RangeNode struct {
- BranchNode
-}
-
-func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
- return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (r *RangeNode) Copy() Node {
- return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
-}
-
-// WithNode represents a {{with}} action and its commands.
-type WithNode struct {
- BranchNode
-}
-
-func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
- return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (w *WithNode) Copy() Node {
- return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
-}
-
-// TemplateNode represents a {{template}} action.
-type TemplateNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Name string // The name of the template (unquoted).
- Pipe *PipeNode // The command to evaluate as dot for the template.
-}
-
-func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
- return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
-}
-
-func (t *TemplateNode) String() string {
- if t.Pipe == nil {
- return fmt.Sprintf("{{template %q}}", t.Name)
- }
- return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
-}
-
-func (t *TemplateNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TemplateNode) Copy() Node {
- return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
-}
diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go
deleted file mode 100644
index 0d77ade87..000000000
--- a/vendor/github.com/alecthomas/template/parse/parse.go
+++ /dev/null
@@ -1,700 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parse builds parse trees for templates as defined by text/template
-// and html/template. Clients should use those packages to construct templates
-// rather than this one, which provides shared internal data structures not
-// intended for general use.
-package parse
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Tree is the representation of a single parsed template.
-type Tree struct {
- Name string // name of the template represented by the tree.
- ParseName string // name of the top-level template during parsing, for error messages.
- Root *ListNode // top-level root of the tree.
- text string // text parsed to create the template (or its parent)
- // Parsing only; cleared after parse.
- funcs []map[string]interface{}
- lex *lexer
- token [3]item // three-token lookahead for parser.
- peekCount int
- vars []string // variables defined at the moment.
-}
-
-// Copy returns a copy of the Tree. Any parsing state is discarded.
-func (t *Tree) Copy() *Tree {
- if t == nil {
- return nil
- }
- return &Tree{
- Name: t.Name,
- ParseName: t.ParseName,
- Root: t.Root.CopyList(),
- text: t.text,
- }
-}
-
-// Parse returns a map from template name to parse.Tree, created by parsing the
-// templates described in the argument string. The top-level template will be
-// given the specified name. If an error is encountered, parsing stops and an
-// empty map is returned with the error.
-func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
- treeSet = make(map[string]*Tree)
- t := New(name)
- t.text = text
- _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
- return
-}
-
-// next returns the next token.
-func (t *Tree) next() item {
- if t.peekCount > 0 {
- t.peekCount--
- } else {
- t.token[0] = t.lex.nextItem()
- }
- return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Tree) backup() {
- t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens.
-// The zeroth token is already there.
-func (t *Tree) backup2(t1 item) {
- t.token[1] = t1
- t.peekCount = 2
-}
-
-// backup3 backs the input stream up three tokens
-// The zeroth token is already there.
-func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
- t.token[1] = t1
- t.token[2] = t2
- t.peekCount = 3
-}
-
-// peek returns but does not consume the next token.
-func (t *Tree) peek() item {
- if t.peekCount > 0 {
- return t.token[t.peekCount-1]
- }
- t.peekCount = 1
- t.token[0] = t.lex.nextItem()
- return t.token[0]
-}
-
-// nextNonSpace returns the next non-space token.
-func (t *Tree) nextNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- return token
-}
-
-// peekNonSpace returns but does not consume the next non-space token.
-func (t *Tree) peekNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- t.backup()
- return token
-}
-
-// Parsing.
-
-// New allocates a new parse tree with the given name.
-func New(name string, funcs ...map[string]interface{}) *Tree {
- return &Tree{
- Name: name,
- funcs: funcs,
- }
-}
-
-// ErrorContext returns a textual representation of the location of the node in the input text.
-// The receiver is only used when the node does not have a pointer to the tree inside,
-// which can occur in old code.
-func (t *Tree) ErrorContext(n Node) (location, context string) {
- pos := int(n.Position())
- tree := n.tree()
- if tree == nil {
- tree = t
- }
- text := tree.text[:pos]
- byteNum := strings.LastIndex(text, "\n")
- if byteNum == -1 {
- byteNum = pos // On first line.
- } else {
- byteNum++ // After the newline.
- byteNum = pos - byteNum
- }
- lineNum := 1 + strings.Count(text, "\n")
- context = n.String()
- if len(context) > 20 {
- context = fmt.Sprintf("%.20s...", context)
- }
- return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
-}
-
-// errorf formats the error and terminates processing.
-func (t *Tree) errorf(format string, args ...interface{}) {
- t.Root = nil
- format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
- panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Tree) error(err error) {
- t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Tree) expect(expected itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected {
- t.unexpected(token, context)
- }
- return token
-}
-
-// expectOneOf consumes the next token and guarantees it has one of the required types.
-func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected1 && token.typ != expected2 {
- t.unexpected(token, context)
- }
- return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Tree) unexpected(token item, context string) {
- t.errorf("unexpected %s in %s", token, context)
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Tree) recover(errp *error) {
- e := recover()
- if e != nil {
- if _, ok := e.(runtime.Error); ok {
- panic(e)
- }
- if t != nil {
- t.stopParse()
- }
- *errp = e.(error)
- }
- return
-}
-
-// startParse initializes the parser, using the lexer.
-func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
- t.Root = nil
- t.lex = lex
- t.vars = []string{"$"}
- t.funcs = funcs
-}
-
-// stopParse terminates parsing.
-func (t *Tree) stopParse() {
- t.lex = nil
- t.vars = nil
- t.funcs = nil
-}
-
-// Parse parses the template definition string to construct a representation of
-// the template for execution. If either action delimiter string is empty, the
-// default ("{{" or "}}") is used. Embedded template definitions are added to
-// the treeSet map.
-func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
- defer t.recover(&err)
- t.ParseName = t.Name
- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
- t.text = text
- t.parse(treeSet)
- t.add(treeSet)
- t.stopParse()
- return t, nil
-}
-
-// add adds tree to the treeSet.
-func (t *Tree) add(treeSet map[string]*Tree) {
- tree := treeSet[t.Name]
- if tree == nil || IsEmptyTree(tree.Root) {
- treeSet[t.Name] = t
- return
- }
- if !IsEmptyTree(t.Root) {
- t.errorf("template: multiple definition of template %q", t.Name)
- }
-}
-
-// IsEmptyTree reports whether this tree (node) is empty of everything but space.
-func IsEmptyTree(n Node) bool {
- switch n := n.(type) {
- case nil:
- return true
- case *ActionNode:
- case *IfNode:
- case *ListNode:
- for _, node := range n.Nodes {
- if !IsEmptyTree(node) {
- return false
- }
- }
- return true
- case *RangeNode:
- case *TemplateNode:
- case *TextNode:
- return len(bytes.TrimSpace(n.Text)) == 0
- case *WithNode:
- default:
- panic("unknown node: " + n.String())
- }
- return false
-}
-
-// parse is the top-level parser for a template, essentially the same
-// as itemList except it also parses {{define}} actions.
-// It runs to EOF.
-func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
- t.Root = t.newList(t.peek().pos)
- for t.peek().typ != itemEOF {
- if t.peek().typ == itemLeftDelim {
- delim := t.next()
- if t.nextNonSpace().typ == itemDefine {
- newT := New("definition") // name will be updated once we know it.
- newT.text = t.text
- newT.ParseName = t.ParseName
- newT.startParse(t.funcs, t.lex)
- newT.parseDefinition(treeSet)
- continue
- }
- t.backup2(delim)
- }
- n := t.textOrAction()
- if n.Type() == nodeEnd {
- t.errorf("unexpected %s", n)
- }
- t.Root.append(n)
- }
- return nil
-}
-
-// parseDefinition parses a {{define}} ... {{end}} template definition and
-// installs the definition in the treeSet map. The "define" keyword has already
-// been scanned.
-func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
- const context = "define clause"
- name := t.expectOneOf(itemString, itemRawString, context)
- var err error
- t.Name, err = strconv.Unquote(name.val)
- if err != nil {
- t.error(err)
- }
- t.expect(itemRightDelim, context)
- var end Node
- t.Root, end = t.itemList()
- if end.Type() != nodeEnd {
- t.errorf("unexpected %s in %s", end, context)
- }
- t.add(treeSet)
- t.stopParse()
-}
-
-// itemList:
-// textOrAction*
-// Terminates at {{end}} or {{else}}, returned separately.
-func (t *Tree) itemList() (list *ListNode, next Node) {
- list = t.newList(t.peekNonSpace().pos)
- for t.peekNonSpace().typ != itemEOF {
- n := t.textOrAction()
- switch n.Type() {
- case nodeEnd, nodeElse:
- return list, n
- }
- list.append(n)
- }
- t.errorf("unexpected EOF")
- return
-}
-
-// textOrAction:
-// text | action
-func (t *Tree) textOrAction() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemElideNewline:
- return t.elideNewline()
- case itemText:
- return t.newText(token.pos, token.val)
- case itemLeftDelim:
- return t.action()
- default:
- t.unexpected(token, "input")
- }
- return nil
-}
-
-// elideNewline:
-// Remove newlines trailing rightDelim if \\ is present.
-func (t *Tree) elideNewline() Node {
- token := t.peek()
- if token.typ != itemText {
- t.unexpected(token, "input")
- return nil
- }
-
- t.next()
- stripped := strings.TrimLeft(token.val, "\n\r")
- diff := len(token.val) - len(stripped)
- if diff > 0 {
- // This is a bit nasty. We mutate the token in-place to remove
- // preceding newlines.
- token.pos += Pos(diff)
- token.val = stripped
- }
- return t.newText(token.pos, token.val)
-}
-
-// Action:
-// control
-// command ("|" command)*
-// Left delim is past. Now get actions.
-// First word could be a keyword such as range.
-func (t *Tree) action() (n Node) {
- switch token := t.nextNonSpace(); token.typ {
- case itemElse:
- return t.elseControl()
- case itemEnd:
- return t.endControl()
- case itemIf:
- return t.ifControl()
- case itemRange:
- return t.rangeControl()
- case itemTemplate:
- return t.templateControl()
- case itemWith:
- return t.withControl()
- }
- t.backup()
- // Do not pop variables; they persist until "end".
- return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
-}
-
-// Pipeline:
-// declarations? command ('|' command)*
-func (t *Tree) pipeline(context string) (pipe *PipeNode) {
- var decl []*VariableNode
- pos := t.peekNonSpace().pos
- // Are there declarations?
- for {
- if v := t.peekNonSpace(); v.typ == itemVariable {
- t.next()
- // Since space is a token, we need 3-token look-ahead here in the worst case:
- // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
- // argument variable rather than a declaration. So remember the token
- // adjacent to the variable so we can push it back if necessary.
- tokenAfterVariable := t.peek()
- if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
- t.nextNonSpace()
- variable := t.newVariable(v.pos, v.val)
- decl = append(decl, variable)
- t.vars = append(t.vars, v.val)
- if next.typ == itemChar && next.val == "," {
- if context == "range" && len(decl) < 2 {
- continue
- }
- t.errorf("too many declarations in %s", context)
- }
- } else if tokenAfterVariable.typ == itemSpace {
- t.backup3(v, tokenAfterVariable)
- } else {
- t.backup2(v)
- }
- }
- break
- }
- pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
- for {
- switch token := t.nextNonSpace(); token.typ {
- case itemRightDelim, itemRightParen:
- if len(pipe.Cmds) == 0 {
- t.errorf("missing value for %s", context)
- }
- if token.typ == itemRightParen {
- t.backup()
- }
- return
- case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
- itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
- t.backup()
- pipe.append(t.command())
- default:
- t.unexpected(token, context)
- }
- }
-}
-
-func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
- defer t.popVars(len(t.vars))
- line = t.lex.lineNumber()
- pipe = t.pipeline(context)
- var next Node
- list, next = t.itemList()
- switch next.Type() {
- case nodeEnd: //done
- case nodeElse:
- if allowElseIf {
- // Special case for "else if". If the "else" is followed immediately by an "if",
- // the elseControl will have left the "if" token pending. Treat
- // {{if a}}_{{else if b}}_{{end}}
- // as
- // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
- // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
- // is assumed. This technique works even for long if-else-if chains.
- // TODO: Should we allow else-if in with and range?
- if t.peek().typ == itemIf {
- t.next() // Consume the "if" token.
- elseList = t.newList(next.Position())
- elseList.append(t.ifControl())
- // Do not consume the next item - only one {{end}} required.
- break
- }
- }
- elseList, next = t.itemList()
- if next.Type() != nodeEnd {
- t.errorf("expected end; found %s", next)
- }
- }
- return pipe.Position(), line, pipe, list, elseList
-}
-
-// If:
-// {{if pipeline}} itemList {{end}}
-// {{if pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) ifControl() Node {
- return t.newIf(t.parseControl(true, "if"))
-}
-
-// Range:
-// {{range pipeline}} itemList {{end}}
-// {{range pipeline}} itemList {{else}} itemList {{end}}
-// Range keyword is past.
-func (t *Tree) rangeControl() Node {
- return t.newRange(t.parseControl(false, "range"))
-}
-
-// With:
-// {{with pipeline}} itemList {{end}}
-// {{with pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) withControl() Node {
- return t.newWith(t.parseControl(false, "with"))
-}
-
-// End:
-// {{end}}
-// End keyword is past.
-func (t *Tree) endControl() Node {
- return t.newEnd(t.expect(itemRightDelim, "end").pos)
-}
-
-// Else:
-// {{else}}
-// Else keyword is past.
-func (t *Tree) elseControl() Node {
- // Special case for "else if".
- peek := t.peekNonSpace()
- if peek.typ == itemIf {
- // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
- return t.newElse(peek.pos, t.lex.lineNumber())
- }
- return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
-}
-
-// Template:
-// {{template stringValue pipeline}}
-// Template keyword is past. The name must be something that can evaluate
-// to a string.
-func (t *Tree) templateControl() Node {
- var name string
- token := t.nextNonSpace()
- switch token.typ {
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- name = s
- default:
- t.unexpected(token, "template invocation")
- }
- var pipe *PipeNode
- if t.nextNonSpace().typ != itemRightDelim {
- t.backup()
- // Do not pop variables; they persist until "end".
- pipe = t.pipeline("template")
- }
- return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
-}
-
-// command:
-// operand (space operand)*
-// space-separated arguments up to a pipeline character or right delimiter.
-// we consume the pipe character but leave the right delim to terminate the action.
-func (t *Tree) command() *CommandNode {
- cmd := t.newCommand(t.peekNonSpace().pos)
- for {
- t.peekNonSpace() // skip leading spaces.
- operand := t.operand()
- if operand != nil {
- cmd.append(operand)
- }
- switch token := t.next(); token.typ {
- case itemSpace:
- continue
- case itemError:
- t.errorf("%s", token.val)
- case itemRightDelim, itemRightParen:
- t.backup()
- case itemPipe:
- default:
- t.errorf("unexpected %s in operand; missing space?", token)
- }
- break
- }
- if len(cmd.Args) == 0 {
- t.errorf("empty command")
- }
- return cmd
-}
-
-// operand:
-// term .Field*
-// An operand is a space-separated component of a command,
-// a term possibly followed by field accesses.
-// A nil return means the next item is not an operand.
-func (t *Tree) operand() Node {
- node := t.term()
- if node == nil {
- return nil
- }
- if t.peek().typ == itemField {
- chain := t.newChain(t.peek().pos, node)
- for t.peek().typ == itemField {
- chain.Add(t.next().val)
- }
- // Compatibility with original API: If the term is of type NodeField
- // or NodeVariable, just put more fields on the original.
- // Otherwise, keep the Chain node.
- // TODO: Switch to Chains always when we can.
- switch node.Type() {
- case NodeField:
- node = t.newField(chain.Position(), chain.String())
- case NodeVariable:
- node = t.newVariable(chain.Position(), chain.String())
- default:
- node = chain
- }
- }
- return node
-}
-
-// term:
-// literal (number, string, nil, boolean)
-// function (identifier)
-// .
-// .Field
-// $
-// '(' pipeline ')'
-// A term is a simple "expression".
-// A nil return means the next item is not a term.
-func (t *Tree) term() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemError:
- t.errorf("%s", token.val)
- case itemIdentifier:
- if !t.hasFunction(token.val) {
- t.errorf("function %q not defined", token.val)
- }
- return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
- case itemDot:
- return t.newDot(token.pos)
- case itemNil:
- return t.newNil(token.pos)
- case itemVariable:
- return t.useVar(token.pos, token.val)
- case itemField:
- return t.newField(token.pos, token.val)
- case itemBool:
- return t.newBool(token.pos, token.val == "true")
- case itemCharConstant, itemComplex, itemNumber:
- number, err := t.newNumber(token.pos, token.val, token.typ)
- if err != nil {
- t.error(err)
- }
- return number
- case itemLeftParen:
- pipe := t.pipeline("parenthesized pipeline")
- if token := t.next(); token.typ != itemRightParen {
- t.errorf("unclosed right paren: unexpected %s", token)
- }
- return pipe
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- return t.newString(token.pos, token.val, s)
- }
- t.backup()
- return nil
-}
-
-// hasFunction reports if a function name exists in the Tree's maps.
-func (t *Tree) hasFunction(name string) bool {
- for _, funcMap := range t.funcs {
- if funcMap == nil {
- continue
- }
- if funcMap[name] != nil {
- return true
- }
- }
- return false
-}
-
-// popVars trims the variable list to the specified length
-func (t *Tree) popVars(n int) {
- t.vars = t.vars[:n]
-}
-
-// useVar returns a node for a variable reference. It errors if the
-// variable is not defined.
-func (t *Tree) useVar(pos Pos, name string) Node {
- v := t.newVariable(pos, name)
- for _, varName := range t.vars {
- if varName == v.Ident[0] {
- return v
- }
- }
- t.errorf("undefined variable %q", v.Ident[0])
- return nil
-}
diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go
deleted file mode 100644
index 447ed2aba..000000000
--- a/vendor/github.com/alecthomas/template/template.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "fmt"
- "reflect"
-
- "github.com/alecthomas/template/parse"
-)
-
-// common holds the information shared by related templates.
-type common struct {
- tmpl map[string]*Template
- // We use two maps, one for parsing and one for execution.
- // This separation makes the API cleaner since it doesn't
- // expose reflection to the client.
- parseFuncs FuncMap
- execFuncs map[string]reflect.Value
-}
-
-// Template is the representation of a parsed template. The *parse.Tree
-// field is exported only for use by html/template and should be treated
-// as unexported by all other clients.
-type Template struct {
- name string
- *parse.Tree
- *common
- leftDelim string
- rightDelim string
-}
-
-// New allocates a new template with the given name.
-func New(name string) *Template {
- return &Template{
- name: name,
- }
-}
-
-// Name returns the name of the template.
-func (t *Template) Name() string {
- return t.name
-}
-
-// New allocates a new template associated with the given one and with the same
-// delimiters. The association, which is transitive, allows one template to
-// invoke another with a {{template}} action.
-func (t *Template) New(name string) *Template {
- t.init()
- return &Template{
- name: name,
- common: t.common,
- leftDelim: t.leftDelim,
- rightDelim: t.rightDelim,
- }
-}
-
-func (t *Template) init() {
- if t.common == nil {
- t.common = new(common)
- t.tmpl = make(map[string]*Template)
- t.parseFuncs = make(FuncMap)
- t.execFuncs = make(map[string]reflect.Value)
- }
-}
-
-// Clone returns a duplicate of the template, including all associated
-// templates. The actual representation is not copied, but the name space of
-// associated templates is, so further calls to Parse in the copy will add
-// templates to the copy but not to the original. Clone can be used to prepare
-// common templates and use them with variant definitions for other templates
-// by adding the variants after the clone is made.
-func (t *Template) Clone() (*Template, error) {
- nt := t.copy(nil)
- nt.init()
- nt.tmpl[t.name] = nt
- for k, v := range t.tmpl {
- if k == t.name { // Already installed.
- continue
- }
- // The associated templates share nt's common structure.
- tmpl := v.copy(nt.common)
- nt.tmpl[k] = tmpl
- }
- for k, v := range t.parseFuncs {
- nt.parseFuncs[k] = v
- }
- for k, v := range t.execFuncs {
- nt.execFuncs[k] = v
- }
- return nt, nil
-}
-
-// copy returns a shallow copy of t, with common set to the argument.
-func (t *Template) copy(c *common) *Template {
- nt := New(t.name)
- nt.Tree = t.Tree
- nt.common = c
- nt.leftDelim = t.leftDelim
- nt.rightDelim = t.rightDelim
- return nt
-}
-
-// AddParseTree creates a new template with the name and parse tree
-// and associates it with t.
-func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
- if t.common != nil && t.tmpl[name] != nil {
- return nil, fmt.Errorf("template: redefinition of template %q", name)
- }
- nt := t.New(name)
- nt.Tree = tree
- t.tmpl[name] = nt
- return nt, nil
-}
-
-// Templates returns a slice of the templates associated with t, including t
-// itself.
-func (t *Template) Templates() []*Template {
- if t.common == nil {
- return nil
- }
- // Return a slice so we don't expose the map.
- m := make([]*Template, 0, len(t.tmpl))
- for _, v := range t.tmpl {
- m = append(m, v)
- }
- return m
-}
-
-// Delims sets the action delimiters to the specified strings, to be used in
-// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
-// definitions will inherit the settings. An empty delimiter stands for the
-// corresponding default: {{ or }}.
-// The return value is the template, so calls can be chained.
-func (t *Template) Delims(left, right string) *Template {
- t.leftDelim = left
- t.rightDelim = right
- return t
-}
-
-// Funcs adds the elements of the argument map to the template's function map.
-// It panics if a value in the map is not a function with appropriate return
-// type. However, it is legal to overwrite elements of the map. The return
-// value is the template, so calls can be chained.
-func (t *Template) Funcs(funcMap FuncMap) *Template {
- t.init()
- addValueFuncs(t.execFuncs, funcMap)
- addFuncs(t.parseFuncs, funcMap)
- return t
-}
-
-// Lookup returns the template with the given name that is associated with t,
-// or nil if there is no such template.
-func (t *Template) Lookup(name string) *Template {
- if t.common == nil {
- return nil
- }
- return t.tmpl[name]
-}
-
-// Parse parses a string into a template. Nested template definitions will be
-// associated with the top-level template t. Parse may be called multiple times
-// to parse definitions of templates to associate with t. It is an error if a
-// resulting template is non-empty (contains content other than template
-// definitions) and would replace a non-empty template with the same name.
-// (In multiple calls to Parse with the same receiver template, only one call
-// can contain text other than space, comments, and template definitions.)
-func (t *Template) Parse(text string) (*Template, error) {
- t.init()
- trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
- if err != nil {
- return nil, err
- }
- // Add the newly parsed trees, including the one for t, into our common structure.
- for name, tree := range trees {
- // If the name we parsed is the name of this template, overwrite this template.
- // The associate method checks it's not a redefinition.
- tmpl := t
- if name != t.name {
- tmpl = t.New(name)
- }
- // Even if t == tmpl, we need to install it in the common.tmpl map.
- if replace, err := t.associate(tmpl, tree); err != nil {
- return nil, err
- } else if replace {
- tmpl.Tree = tree
- }
- tmpl.leftDelim = t.leftDelim
- tmpl.rightDelim = t.rightDelim
- }
- return t, nil
-}
-
-// associate installs the new template into the group of templates associated
-// with t. It is an error to reuse a name except to overwrite an empty
-// template. The two are already known to share the common structure.
-// The boolean return value reports wither to store this tree as t.Tree.
-func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
- if new.common != t.common {
- panic("internal error: associate not common")
- }
- name := new.name
- if old := t.tmpl[name]; old != nil {
- oldIsEmpty := parse.IsEmptyTree(old.Root)
- newIsEmpty := parse.IsEmptyTree(tree.Root)
- if newIsEmpty {
- // Whether old is empty or not, new is empty; no reason to replace old.
- return false, nil
- }
- if !oldIsEmpty {
- return false, fmt.Errorf("template: redefinition of template %q", name)
- }
- }
- t.tmpl[name] = new
- return true, nil
-}
diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go
index 61d0ca479..cd439f51c 100644
--- a/vendor/github.com/alecthomas/units/bytes.go
+++ b/vendor/github.com/alecthomas/units/bytes.go
@@ -40,6 +40,12 @@ func (b Base2Bytes) String() string {
return ToString(int64(b), 1024, "iB", "B")
}
+func (b *Base2Bytes) UnmarshalText(text []byte) error {
+ n, err := ParseBase2Bytes(string(text))
+ *b = n
+ return err
+}
+
var (
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
)
diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml
deleted file mode 100644
index 87d230c8d..000000000
--- a/vendor/github.com/armon/go-metrics/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
- - "1.x"
-
-env:
- - GO111MODULE=on
-
-install:
- - go get ./...
-
-script:
- - go test ./...
diff --git a/vendor/github.com/armon/go-metrics/go.mod b/vendor/github.com/armon/go-metrics/go.mod
deleted file mode 100644
index e3a656ed7..000000000
--- a/vendor/github.com/armon/go-metrics/go.mod
+++ /dev/null
@@ -1,17 +0,0 @@
-module github.com/armon/go-metrics
-
-go 1.12
-
-require (
- github.com/DataDog/datadog-go v3.2.0+incompatible
- github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible
- github.com/circonus-labs/circonusllhist v0.1.3 // indirect
- github.com/golang/protobuf v1.3.2
- github.com/hashicorp/go-immutable-radix v1.0.0
- github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
- github.com/pascaldekloe/goe v0.1.0
- github.com/prometheus/client_golang v1.4.0
- github.com/prometheus/client_model v0.2.0
- github.com/prometheus/common v0.9.1
- github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
-)
diff --git a/vendor/github.com/armon/go-metrics/go.sum b/vendor/github.com/armon/go-metrics/go.sum
deleted file mode 100644
index 519481e6b..000000000
--- a/vendor/github.com/armon/go-metrics/go.sum
+++ /dev/null
@@ -1,125 +0,0 @@
-github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
-github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI=
-github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go
index e8206daab..4e2d6a709 100644
--- a/vendor/github.com/armon/go-metrics/inmem.go
+++ b/vendor/github.com/armon/go-metrics/inmem.go
@@ -10,8 +10,6 @@ import (
"time"
)
-var spaceReplacer = strings.NewReplacer(" ", "_")
-
// InmemSink provides a MetricSink that does in-memory aggregation
// without sending metrics over a network. It can be embedded within
// an application to provide profiling information.
@@ -257,11 +255,11 @@ func (i *InmemSink) Data() []*IntervalMetrics {
}
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
for k, v := range current.Counters {
- copyCurrent.Counters[k] = v.deepCopy()
+ copyCurrent.Counters[k] = v
}
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
for k, v := range current.Samples {
- copyCurrent.Samples[k] = v.deepCopy()
+ copyCurrent.Samples[k] = v
}
current.RUnlock()
@@ -314,21 +312,36 @@ func (i *InmemSink) getInterval() *IntervalMetrics {
// Flattens the key for formatting, removes spaces
func (i *InmemSink) flattenKey(parts []string) string {
buf := &bytes.Buffer{}
+ replacer := strings.NewReplacer(" ", "_")
- joined := strings.Join(parts, ".")
-
- spaceReplacer.WriteString(buf, joined)
+ if len(parts) > 0 {
+ replacer.WriteString(buf, parts[0])
+ }
+ for _, part := range parts[1:] {
+ replacer.WriteString(buf, ".")
+ replacer.WriteString(buf, part)
+ }
return buf.String()
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
- key := i.flattenKey(parts)
- buf := bytes.NewBufferString(key)
+ buf := &bytes.Buffer{}
+ replacer := strings.NewReplacer(" ", "_")
+
+ if len(parts) > 0 {
+ replacer.WriteString(buf, parts[0])
+ }
+ for _, part := range parts[1:] {
+ replacer.WriteString(buf, ".")
+ replacer.WriteString(buf, part)
+ }
+
+ key := buf.String()
for _, label := range labels {
- spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
+ replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
}
return buf.String(), key
diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go
index 5fac958d9..504f1b374 100644
--- a/vendor/github.com/armon/go-metrics/inmem_endpoint.go
+++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go
@@ -41,16 +41,6 @@ type SampledValue struct {
DisplayLabels map[string]string `json:"Labels"`
}
-// deepCopy allocates a new instance of AggregateSample
-func (source *SampledValue) deepCopy() SampledValue {
- dest := *source
- if source.AggregateSample != nil {
- dest.AggregateSample = &AggregateSample{}
- *dest.AggregateSample = *source.AggregateSample
- }
- return dest
-}
-
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
data := i.Data()
@@ -62,15 +52,12 @@ func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request)
return nil, fmt.Errorf("no metric intervals have been initialized yet")
case n == 1:
// Show the current interval if it's all we have
- interval = data[0]
+ interval = i.intervals[0]
default:
// Show the most recent finished interval if we have one
- interval = data[n-2]
+ interval = i.intervals[n-2]
}
- interval.RLock()
- defer interval.RUnlock()
-
summary := MetricsSummary{
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go
index 457b74bb5..cf9def748 100644
--- a/vendor/github.com/armon/go-metrics/metrics.go
+++ b/vendor/github.com/armon/go-metrics/metrics.go
@@ -197,7 +197,7 @@ func (m *Metrics) filterLabels(labels []Label) []Label {
if labels == nil {
return nil
}
- toReturn := []Label{}
+ toReturn := labels[:0]
for _, label := range labels {
if m.labelIsAllowed(&label) {
toReturn = append(toReturn, label)
@@ -269,25 +269,10 @@ func (m *Metrics) emitRuntimeStats() {
m.lastNumGC = num
}
-// Creates a new slice with the provided string value as the first element
-// and the provided slice values as the remaining values.
-// Ordering of the values in the provided input slice is kept in tact in the output slice.
+// Inserts a string value at an index into the slice
func insert(i int, v string, s []string) []string {
- // Allocate new slice to avoid modifying the input slice
- newS := make([]string, len(s)+1)
-
- // Copy s[0, i-1] into newS
- for j := 0; j < i; j++ {
- newS[j] = s[j]
- }
-
- // Insert provided element at index i
- newS[i] = v
-
- // Copy s[i, len(s)-1] into newS starting at newS[i+1]
- for j := i; j < len(s); j++ {
- newS[j+1] = s[j]
- }
-
- return newS
+ s = append(s, "")
+ copy(s[i+1:], s[i:])
+ s[i] = v
+ return s
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
index 710eb432f..11d4240d6 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -50,9 +50,19 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
for i, n := range names {
val := v.FieldByName(n)
+ ft, ok := v.Type().FieldByName(n)
+ if !ok {
+ panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type()))
+ }
+
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
- prettify(val, indent+2, buf)
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("")
+ } else {
+ prettify(val, indent+2, buf)
+ }
if i < len(names)-1 {
buf.WriteString(",\n")
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
index 645df2450..3f7cffd95 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -8,6 +8,8 @@ import (
)
// StringValue returns the string representation of a value.
+//
+// Deprecated: Use Prettify instead.
func StringValue(i interface{}) string {
var buf bytes.Buffer
stringValue(reflect.ValueOf(i), 0, &buf)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
index 03334d692..74f35ccf0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -88,10 +88,6 @@ func (c *Client) NewRequest(operation *request.Operation, params interface{}, da
// AddDebugHandlers injects debug logging handlers into the service to log request
// debug information.
func (c *Client) AddDebugHandlers() {
- if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
- return
- }
-
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
index 8958c32d4..1d774cfa2 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -53,6 +53,10 @@ var LogHTTPRequestHandler = request.NamedHandler{
}
func logRequest(r *request.Request) {
+ if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
bodySeekable := aws.IsReaderSeekable(r.Body)
@@ -120,6 +124,10 @@ var LogHTTPResponseHandler = request.NamedHandler{
}
func logResponse(r *request.Request) {
+ if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
if r.HTTPResponse == nil {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 3b809e847..39fa6d5fe 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -438,13 +438,6 @@ func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
return c
}
-// MergeIn merges the passed in configs into the existing config object.
-func (c *Config) MergeIn(cfgs ...*Config) {
- for _, other := range cfgs {
- mergeInConfig(c, other)
- }
-}
-
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
// when resolving the endpoint for a service
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
@@ -459,6 +452,27 @@ func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEn
return c
}
+// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value
+// returning a Config pointer for chaining.
+func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
+ c.LowerCaseHeaderMaps = &t
+ return c
+}
+
+// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
+ c.DisableRestProtocolURICleaning = &t
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
@@ -571,6 +585,10 @@ func mergeInConfig(dst *Config, other *Config) {
if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
}
+
+ if other.LowerCaseHeaderMaps != nil {
+ dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps
+ }
}
// Copy will return a shallow copy of the Config object. If any additional
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
index 2866f9a7f..89aad2c67 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
@@ -1,3 +1,4 @@
+//go:build !go1.9
// +build !go1.9
package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
index 3718b26e1..6ee9ddd18 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
@@ -1,3 +1,4 @@
+//go:build go1.9
// +build go1.9
package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
index 2f9446333..313218190 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
@@ -1,3 +1,4 @@
+//go:build !go1.7
// +build !go1.7
package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
index 9c29f29af..9975d561b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
@@ -1,3 +1,4 @@
+//go:build go1.7
// +build go1.7
package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index d95a5eb54..36a915efe 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -178,7 +178,7 @@ func handleSendError(r *request.Request, err error) {
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
// this may be replaced by an UnmarshalError handler
- r.Error = awserr.New("UnknownError", "unknown error", nil)
+ r.Error = awserr.New("UnknownError", "unknown error", r.Error)
}
}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go
index 5852b2648..6e3406b1f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go
@@ -1,3 +1,4 @@
+//go:build !go1.7
// +build !go1.7
package credentials
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go
index 388b21541..a68df0ee7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go
@@ -1,3 +1,4 @@
+//go:build go1.7
// +build go1.7
package credentials
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go
index 8152a864a..0345fab2d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go
@@ -1,3 +1,4 @@
+//go:build !go1.9
// +build !go1.9
package credentials
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go
index 4356edb3d..79018aba7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go
@@ -1,3 +1,4 @@
+//go:build go1.9
// +build go1.9
package credentials
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
new file mode 100644
index 000000000..18c940ab3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
@@ -0,0 +1,60 @@
+// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
+//
+// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
+// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
+// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
+// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
+//
+// Loading AWS SSO credentials with the AWS shared configuration file
+//
+// You can use configure AWS SSO credentials from the AWS shared configuration file by
+// providing the specifying the required keys in the profile:
+//
+// sso_account_id
+// sso_region
+// sso_role_name
+// sso_start_url
+//
+// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
+// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
+// provided, or an error will be returned.
+//
+// [profile devsso]
+// sso_start_url = https://my-sso-portal.awsapps.com/start
+// sso_role_name = SSOReadOnlyRole
+// sso_region = us-east-1
+// sso_account_id = 123456789012
+//
+// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
+// retrieve credentials. For example:
+//
+// sess, err := session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// Profile: "devsso",
+// })
+// if err != nil {
+// return err
+// }
+//
+// Programmatically loading AWS SSO credentials directly
+//
+// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
+// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
+//
+// svc := sso.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region
+// })
+//
+// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start")
+//
+// credentials, err := provider.Get()
+// if err != nil {
+// return err
+// }
+//
+// Additional Resources
+//
+// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+//
+// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
+package ssocreds
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
new file mode 100644
index 000000000..d4df39a7a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
@@ -0,0 +1,10 @@
+//go:build !windows
+// +build !windows
+
+package ssocreds
+
+import "os"
+
+func getHomeDirectory() string {
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
new file mode 100644
index 000000000..eb48f61e5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
@@ -0,0 +1,7 @@
+package ssocreds
+
+import "os"
+
+func getHomeDirectory() string {
+ return os.Getenv("USERPROFILE")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
new file mode 100644
index 000000000..6eda2a555
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
@@ -0,0 +1,180 @@
+package ssocreds
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/service/sso"
+ "github.com/aws/aws-sdk-go/service/sso/ssoiface"
+)
+
+// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid.
+// To refresh the SSO session run aws sso login with the corresponding profile.
+const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken"
+
+const invalidTokenMessage = "the SSO session has expired or is invalid"
+
+func init() {
+ nowTime = time.Now
+ defaultCacheLocation = defaultCacheLocationImpl
+}
+
+var nowTime func() time.Time
+
+// ProviderName is the name of the provider used to specify the source of credentials.
+const ProviderName = "SSOProvider"
+
+var defaultCacheLocation func() string
+
+func defaultCacheLocationImpl() string {
+ return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
+}
+
+// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
+type Provider struct {
+ credentials.Expiry
+
+ // The Client which is configured for the AWS Region where the AWS SSO user portal is located.
+ Client ssoiface.SSOAPI
+
+ // The AWS account that is assigned to the user.
+ AccountID string
+
+ // The role name that is assigned to the user.
+ RoleName string
+
+ // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
+ StartURL string
+}
+
+// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured
+// for the AWS Region where the AWS SSO user portal is located.
+func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
+ return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...)
+}
+
+// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
+// for the AWS Region where the AWS SSO user portal is located.
+func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
+ p := &Provider{
+ Client: client,
+ AccountID: accountID,
+ RoleName: roleName,
+ StartURL: startURL,
+ }
+
+ for _, fn := range optFns {
+ fn(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
+// by exchanging the accessToken present in ~/.aws/sso/cache.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ return p.RetrieveWithContext(aws.BackgroundContext())
+}
+
+// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
+// by exchanging the accessToken present in ~/.aws/sso/cache.
+func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
+ tokenFile, err := loadTokenFile(p.StartURL)
+ if err != nil {
+ return credentials.Value{}, err
+ }
+
+ output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{
+ AccessToken: &tokenFile.AccessToken,
+ AccountId: &p.AccountID,
+ RoleName: &p.RoleName,
+ })
+ if err != nil {
+ return credentials.Value{}, err
+ }
+
+ expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC()
+ p.SetExpiration(expireTime, 0)
+
+ return credentials.Value{
+ AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId),
+ SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey),
+ SessionToken: aws.StringValue(output.RoleCredentials.SessionToken),
+ ProviderName: ProviderName,
+ }, nil
+}
+
+func getCacheFileName(url string) (string, error) {
+ hash := sha1.New()
+ _, err := hash.Write([]byte(url))
+ if err != nil {
+ return "", err
+ }
+ return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
+}
+
+type rfc3339 time.Time
+
+func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
+ var value string
+
+ if err := json.Unmarshal(bytes, &value); err != nil {
+ return err
+ }
+
+ parse, err := time.Parse(time.RFC3339, value)
+ if err != nil {
+ return fmt.Errorf("expected RFC3339 timestamp: %v", err)
+ }
+
+ *r = rfc3339(parse)
+
+ return nil
+}
+
+type token struct {
+ AccessToken string `json:"accessToken"`
+ ExpiresAt rfc3339 `json:"expiresAt"`
+ Region string `json:"region,omitempty"`
+ StartURL string `json:"startUrl,omitempty"`
+}
+
+func (t token) Expired() bool {
+ return nowTime().Round(0).After(time.Time(t.ExpiresAt))
+}
+
+func loadTokenFile(startURL string) (t token, err error) {
+ key, err := getCacheFileName(startURL)
+ if err != nil {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
+ }
+
+ fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
+ if err != nil {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
+ }
+
+ if err := json.Unmarshal(fileBytes, &t); err != nil {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
+ }
+
+ if len(t.AccessToken) == 0 {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
+ }
+
+ if t.Expired() {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
+ }
+
+ return t, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index 6846ef6f8..260a37cbb 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -95,7 +95,7 @@ import (
// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
// An error is returned if reading from stdin fails.
//
-// Use this function go read MFA tokens from stdin. The function makes no attempt
+// Use this function to read MFA tokens from stdin. The function makes no attempt
// to make atomic prompts from stdin across multiple gorouties.
//
// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
@@ -244,9 +244,11 @@ type AssumeRoleProvider struct {
MaxJitterFrac float64
}
-// NewCredentials returns a pointer to a new Credentials object wrapping the
+// NewCredentials returns a pointer to a new Credentials value wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation.
+// role will be named after a nanosecond timestamp of this operation. The
+// Credentials value will attempt to refresh the credentials using the provider
+// when Credentials.Get is called, if the cached credentials are expiring.
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
@@ -268,9 +270,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As
return credentials.NewCredentials(p)
}
-// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation.
+// role will be named after a nanosecond timestamp of this operation. The
+// Credentials value will attempt to refresh the credentials using the provider
+// when Credentials.Get is called, if the cached credentials are expiring.
//
// Takes an AssumeRoler which can be satisfied by the STS client.
//
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index 8f35b3464..df63bade1 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -13,7 +13,6 @@ package ec2metadata
import (
"bytes"
- "errors"
"io"
"net/http"
"net/url"
@@ -234,7 +233,8 @@ func unmarshalError(r *request.Request) {
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
- r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())),
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil),
r.HTTPResponse.StatusCode, r.RequestID)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
index 654fb1ad5..b98ea8698 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -81,7 +81,6 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
// Customization
for i := 0; i < len(ps); i++ {
p := &ps[i]
- custAddEC2Metadata(p)
custAddS3DualStack(p)
custRegionalS3(p)
custRmIotDataService(p)
@@ -140,19 +139,6 @@ func custAddDualstack(p *partition, svcName string) {
p.Services[svcName] = s
}
-func custAddEC2Metadata(p *partition) {
- p.Services["ec2metadata"] = service{
- IsRegionalized: boxedFalse,
- PartitionEndpoint: "aws-global",
- Endpoints: endpoints{
- "aws-global": endpoint{
- Hostname: "169.254.169.254/latest",
- Protocols: []string{"http"},
- },
- },
- }
-}
-
func custRmIotDataService(p *partition) {
delete(p.Services, "data.iot")
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 35db88d99..ec0d71f1f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -21,6 +21,7 @@ const (
ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong).
ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka).
ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
@@ -121,6 +122,9 @@ var awsPartition = partition{
"ap-northeast-2": region{
Description: "Asia Pacific (Seoul)",
},
+ "ap-northeast-3": region{
+ Description: "Asia Pacific (Osaka)",
+ },
"ap-south-1": region{
Description: "Asia Pacific (Mumbai)",
},
@@ -184,6 +188,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -239,6 +244,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -296,6 +302,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -348,11 +355,58 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "amplify": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "amplifybackend": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -380,9 +434,33 @@ var awsPartition = partition{
"me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "api.detective-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "api.detective-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "api.detective-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "api.detective-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"api.ecr": service{
@@ -412,6 +490,12 @@ var awsPartition = partition{
Region: "ap-northeast-2",
},
},
+ "ap-northeast-3": endpoint{
+ Hostname: "api.ecr.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
"ap-south-1": endpoint{
Hostname: "api.ecr.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -581,6 +665,48 @@ var awsPartition = partition{
},
},
},
+ "api.fleethub.iot": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"api.mediatailor": service{
Endpoints: endpoints{
@@ -611,6 +737,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -660,6 +787,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -684,6 +812,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
@@ -693,6 +822,7 @@ var awsPartition = partition{
"appflow": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -719,6 +849,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -740,6 +871,7 @@ var awsPartition = partition{
"appmesh": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -761,6 +893,16 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "apprunner": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"appstream2": service{
Defaults: endpoint{
Protocols: []string{"https"},
@@ -776,6 +918,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"fips": endpoint{
Hostname: "appstream2-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
@@ -792,6 +935,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -810,6 +954,18 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "aps": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"athena": service{
Endpoints: endpoints{
@@ -817,6 +973,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -868,6 +1025,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -895,6 +1053,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -920,6 +1079,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -941,9 +1101,11 @@ var awsPartition = partition{
"batch": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1031,9 +1193,11 @@ var awsPartition = partition{
"cloud9": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1073,6 +1237,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1246,7 +1411,10 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -1259,6 +1427,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1307,6 +1476,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1338,6 +1508,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1407,6 +1578,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1518,11 +1690,12 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"cognito-idp": service{
@@ -1551,17 +1724,24 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
+ "fips-us-west-1": endpoint{
+ Hostname: "cognito-idp-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
"fips-us-west-2": endpoint{
Hostname: "cognito-idp-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"cognito-sync": service{
@@ -1654,6 +1834,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1702,6 +1883,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
@@ -1711,7 +1893,11 @@ var awsPartition = partition{
"contact-lens": service{
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -1722,6 +1908,59 @@ var awsPartition = partition{
"us-east-1": endpoint{},
},
},
+ "data.jobs.iot": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"data.mediastore": service{
Endpoints: endpoints{
@@ -1769,6 +2008,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1848,6 +2088,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -1909,6 +2150,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2029,6 +2271,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2086,6 +2329,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2148,6 +2392,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2205,6 +2450,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2253,17 +2499,6 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
- "ec2metadata": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
-
- Endpoints: endpoints{
- "aws-global": endpoint{
- Hostname: "169.254.169.254/latest",
- Protocols: []string{"http"},
- },
- },
- },
"ecs": service{
Endpoints: endpoints{
@@ -2271,6 +2506,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2322,6 +2558,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2371,6 +2608,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2402,6 +2640,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2451,6 +2690,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2485,6 +2725,12 @@ var awsPartition = partition{
Region: "ap-northeast-2",
},
},
+ "fips-ap-northeast-3": endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
"fips-ap-south-1": endpoint{
Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -2598,6 +2844,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2650,6 +2897,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2729,26 +2977,6 @@ var awsPartition = partition{
"emr-containers": service{
Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
- },
- },
- "entitlement.marketplace": service{
- Defaults: endpoint{
- CredentialScope: credentialScope{
- Service: "aws-marketplace",
- },
- },
- Endpoints: endpoints{
- "us-east-1": endpoint{},
- },
- },
- "es": service{
-
- Endpoints: endpoints{
- "af-south-1": endpoint{},
- "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2757,6 +2985,70 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "emr-containers-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "emr-containers-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "emr-containers-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "emr-containers-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "emr-containers-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
@@ -2782,6 +3074,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2824,6 +3117,26 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "finspace": service{
+
+ Endpoints: endpoints{
+ "ca-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "finspace-api": service{
+
+ Endpoints: endpoints{
+ "ca-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"firehose": service{
Endpoints: endpoints{
@@ -2831,6 +3144,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -2882,6 +3196,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3024,9 +3339,27 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "forecast-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "forecast-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "forecast-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"forecastquery": service{
@@ -3039,6 +3372,35 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "frauddetector": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -3047,28 +3409,64 @@ var awsPartition = partition{
"fsx": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-prod-ca-central-1": endpoint{
+ Hostname: "fsx-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-prod-us-east-1": endpoint{
+ Hostname: "fsx-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-prod-us-east-2": endpoint{
+ Hostname: "fsx-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-prod-us-west-1": endpoint{
+ Hostname: "fsx-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-prod-us-west-2": endpoint{
+ Hostname: "fsx-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"gamelift": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -3076,8 +3474,12 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -3094,6 +3496,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3149,6 +3552,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3214,9 +3618,17 @@ var awsPartition = partition{
Endpoints: endpoints{
"af-south-1": endpoint{},
+ "ap-northeast-2": endpoint{},
"ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "groundstation-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
"fips-us-east-2": endpoint{
Hostname: "groundstation-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
@@ -3230,6 +3642,7 @@ var awsPartition = partition{
},
},
"me-south-1": endpoint{},
+ "us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
@@ -3244,6 +3657,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3303,6 +3717,8 @@ var awsPartition = partition{
},
Endpoints: endpoints{
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"honeycode": service{
@@ -3330,6 +3746,18 @@ var awsPartition = partition{
},
},
},
+ "identity-chime": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "identity-chime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"identitystore": service{
Endpoints: endpoints{
@@ -3423,18 +3851,49 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "iot-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "iot-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "iot-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "iot-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "iot-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"iotanalytics": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
@@ -3538,12 +3997,42 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"iotthingsgraph": service{
@@ -3564,6 +4053,18 @@ var awsPartition = partition{
"iotwireless": service{
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
"eu-west-1": endpoint{
Hostname: "api.iotwireless.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -3576,6 +4077,20 @@ var awsPartition = partition{
Region: "us-east-1",
},
},
+ "us-west-2": endpoint{
+ Hostname: "api.iotwireless.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ivs": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"kafka": service{
@@ -3609,6 +4124,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3702,6 +4218,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3723,6 +4240,8 @@ var awsPartition = partition{
"lakeformation": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -3759,11 +4278,12 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"lambda": service{
@@ -3773,6 +4293,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3822,6 +4343,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3874,6 +4396,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3889,6 +4412,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -3931,6 +4455,14 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "lookoutequipment": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
"lookoutvision": service{
Endpoints: endpoints{
@@ -3976,6 +4508,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -4159,6 +4692,7 @@ var awsPartition = partition{
"eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -4177,6 +4711,18 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "messaging-chime": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "messaging-chime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"metering.marketplace": service{
Defaults: endpoint{
CredentialScope: credentialScope{
@@ -4188,6 +4734,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -4238,7 +4785,19 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"monitoring": service{
@@ -4250,6 +4809,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -4298,6 +4858,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -4526,6 +5087,12 @@ var awsPartition = partition{
Region: "eu-west-2",
},
},
+ "eu-west-3": endpoint{
+ Hostname: "oidc.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
"us-east-1": endpoint{
Hostname: "oidc.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -4606,6 +5173,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -4654,6 +5222,22 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "personalize": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"pinpoint": service{
Defaults: endpoint{
CredentialScope: credentialScope{
@@ -4699,6 +5283,7 @@ var awsPartition = partition{
"polly": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -4808,6 +5393,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
@@ -4835,9 +5421,28 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "eu-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "qldb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "qldb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "qldb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"ram": service{
@@ -4847,6 +5452,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -4857,12 +5463,42 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "ram-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "ram-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "ram-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "ram-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "ram-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"rds": service{
@@ -4872,6 +5508,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -4929,6 +5566,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5032,6 +5670,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5105,6 +5744,17 @@ var awsPartition = partition{
},
},
},
+ "route53-recovery-control-config": service{
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
"route53domains": service{
Endpoints: endpoints{
@@ -5120,6 +5770,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5152,7 +5803,19 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "runtime-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "runtime-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"runtime.sagemaker": service{
@@ -5162,6 +5825,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5215,6 +5879,90 @@ var awsPartition = partition{
DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
+ "accesspoint-af-south-1": endpoint{
+ Hostname: "s3-accesspoint.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ap-east-1": endpoint{
+ Hostname: "s3-accesspoint.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ap-northeast-1": endpoint{
+ Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ap-northeast-2": endpoint{
+ Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ap-northeast-3": endpoint{
+ Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ap-south-1": endpoint{
+ Hostname: "s3-accesspoint.ap-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ap-southeast-1": endpoint{
+ Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ap-southeast-2": endpoint{
+ Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-ca-central-1": endpoint{
+ Hostname: "s3-accesspoint.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-eu-central-1": endpoint{
+ Hostname: "s3-accesspoint.eu-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-eu-north-1": endpoint{
+ Hostname: "s3-accesspoint.eu-north-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-eu-south-1": endpoint{
+ Hostname: "s3-accesspoint.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-eu-west-1": endpoint{
+ Hostname: "s3-accesspoint.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-eu-west-2": endpoint{
+ Hostname: "s3-accesspoint.eu-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-eu-west-3": endpoint{
+ Hostname: "s3-accesspoint.eu-west-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-me-south-1": endpoint{
+ Hostname: "s3-accesspoint.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-sa-east-1": endpoint{
+ Hostname: "s3-accesspoint.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-us-east-1": endpoint{
+ Hostname: "s3-accesspoint.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-us-east-2": endpoint{
+ Hostname: "s3-accesspoint.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-us-west-1": endpoint{
+ Hostname: "s3-accesspoint.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-us-west-2": endpoint{
+ Hostname: "s3-accesspoint.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
"af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{
@@ -5222,6 +5970,7 @@ var awsPartition = partition{
SignatureVersions: []string{"s3", "s3v4"},
},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{
Hostname: "s3.ap-southeast-1.amazonaws.com",
@@ -5246,8 +5995,28 @@ var awsPartition = partition{
Hostname: "s3.eu-west-1.amazonaws.com",
SignatureVersions: []string{"s3", "s3v4"},
},
- "eu-west-2": endpoint{},
- "eu-west-3": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-accesspoint-ca-central-1": endpoint{
+ Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "fips-accesspoint-us-east-1": endpoint{
+ Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "fips-accesspoint-us-east-2": endpoint{
+ Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "fips-accesspoint-us-west-1": endpoint{
+ Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "fips-accesspoint-us-west-2": endpoint{
+ Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
"me-south-1": endpoint{},
"s3-external-1": endpoint{
Hostname: "s3-external-1.amazonaws.com",
@@ -5298,6 +6067,13 @@ var awsPartition = partition{
Region: "ap-northeast-2",
},
},
+ "ap-northeast-3": endpoint{
+ Hostname: "s3-control.ap-northeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
"ap-south-1": endpoint{
Hostname: "s3-control.ap-south-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
@@ -5493,6 +6269,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5542,6 +6319,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5645,7 +6423,57 @@ var awsPartition = partition{
},
},
},
- "servicecatalog": service{
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "servicecatalog-appregistry": service{
Endpoints: endpoints{
"af-south-1": endpoint{},
@@ -5662,36 +6490,42 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-1-fips": endpoint{
- Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
+ "fips-ca-central-1": endpoint{
+ Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
- "us-east-2": endpoint{},
- "us-east-2-fips": endpoint{
- Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
+ "fips-us-east-2": endpoint{
+ Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
- "us-west-1": endpoint{},
- "us-west-1-fips": endpoint{
- Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
+ "fips-us-west-1": endpoint{
+ Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
},
- "us-west-2": endpoint{},
- "us-west-2-fips": endpoint{
- Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
+ "fips-us-west-2": endpoint{
+ Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"servicediscovery": service{
@@ -5734,6 +6568,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5761,9 +6596,28 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "eu-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "session.qldb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "session.qldb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "session.qldb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"shield": service{
@@ -5844,6 +6698,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -5966,6 +6821,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6018,6 +6874,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6069,6 +6926,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6117,6 +6975,20 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "ssm-incidents": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"states": service{
Endpoints: endpoints{
@@ -6124,6 +6996,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6173,6 +7046,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6269,6 +7143,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6336,6 +7211,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6385,6 +7261,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -6470,6 +7347,8 @@ var awsPartition = partition{
"transfer": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -6478,6 +7357,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -6511,11 +7391,12 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"translate": service{
@@ -6605,6 +7486,12 @@ var awsPartition = partition{
Region: "ap-northeast-2",
},
},
+ "ap-northeast-3": endpoint{
+ Hostname: "waf-regional.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
"ap-south-1": endpoint{
Hostname: "waf-regional.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -6689,6 +7576,12 @@ var awsPartition = partition{
Region: "ap-northeast-2",
},
},
+ "fips-ap-northeast-3": endpoint{
+ Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
"fips-ap-south-1": endpoint{
Hostname: "waf-regional-fips.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -6861,6 +7754,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
@@ -6891,6 +7785,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@@ -7159,9 +8054,17 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "data.jobs.iot": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"dax": service{
Endpoints: endpoints{
+ "cn-north-1": endpoint{},
"cn-northwest-1": endpoint{},
},
},
@@ -7222,17 +8125,6 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
- "ec2metadata": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
-
- Endpoints: endpoints{
- "aws-global": endpoint{
- Hostname: "169.254.169.254/latest",
- Protocols: []string{"http"},
- },
- },
- },
"ecs": service{
Endpoints: endpoints{
@@ -7300,6 +8192,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "emr-containers": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"es": service{
Endpoints: endpoints{
@@ -7321,6 +8220,15 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "fms": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"fsx": service{
Endpoints: endpoints{
@@ -7331,7 +8239,8 @@ var awscnPartition = partition{
"gamelift": service{
Endpoints: endpoints{
- "cn-north-1": endpoint{},
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
},
},
"glacier": service{
@@ -7359,6 +8268,16 @@ var awscnPartition = partition{
"cn-north-1": endpoint{},
},
},
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"health": service{
Endpoints: endpoints{
@@ -7451,7 +8370,8 @@ var awscnPartition = partition{
"lakeformation": service{
Endpoints: endpoints{
- "cn-north-1": endpoint{},
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
},
},
"lambda": service{
@@ -7495,9 +8415,22 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "mq": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"neptune": service{
Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "rds.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
"cn-northwest-1": endpoint{
Hostname: "rds.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
@@ -7519,6 +8452,12 @@ var awscnPartition = partition{
},
},
},
+ "personalize": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"polly": service{
Endpoints: endpoints{
@@ -7566,6 +8505,15 @@ var awscnPartition = partition{
},
},
},
+ "route53resolver": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"runtime.sagemaker": service{
Endpoints: endpoints{
@@ -7582,6 +8530,14 @@ var awscnPartition = partition{
DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
+ "accesspoint-cn-north-1": endpoint{
+ Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-cn-northwest-1": endpoint{
+ Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ },
"cn-north-1": endpoint{},
"cn-northwest-1": endpoint{},
},
@@ -7638,6 +8594,13 @@ var awscnPartition = partition{
},
},
},
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"servicediscovery": service{
Endpoints: endpoints{
@@ -7775,6 +8738,49 @@ var awscnPartition = partition{
},
},
},
+ "transcribestreaming": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "transfer": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "waf-regional.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ "fips-cn-north-1": endpoint{
+ Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "fips-cn-northwest-1": endpoint{
+ Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"workspaces": service{
Endpoints: endpoints{
@@ -7875,6 +8881,27 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "api.detective": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"api.ecr": service{
Endpoints: endpoints{
@@ -8224,7 +9251,32 @@ var awsusgovPartition = partition{
},
},
"fips-us-gov-west-1": endpoint{
- Hostname: "config.us-gov-west-1.amazonaws.com",
+ Hostname: "config.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "connect": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "data.jobs.iot": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -8355,17 +9407,6 @@ var awsusgovPartition = partition{
},
},
},
- "ec2metadata": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
-
- Endpoints: endpoints{
- "aws-global": endpoint{
- Hostname: "169.254.169.254/latest",
- Protocols: []string{"http"},
- },
- },
- },
"ecs": service{
Endpoints: endpoints{
@@ -8558,6 +9599,27 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "fms": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "fms-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "fms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"fsx": service{
Endpoints: endpoints{
@@ -8638,7 +9700,12 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
- "us-gov-east-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "greengrass.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{
Hostname: "greengrass.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -8725,6 +9792,18 @@ var awsusgovPartition = partition{
},
},
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "iot-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "iot-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -8732,6 +9811,18 @@ var awsusgovPartition = partition{
"iotsecuredtunneling": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -8869,6 +9960,22 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"monitoring": service{
Endpoints: endpoints{
@@ -8888,6 +9995,25 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "mq": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "mq-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "mq-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"neptune": service{
Endpoints: endpoints{
@@ -8977,8 +10103,18 @@ var awsusgovPartition = partition{
"ram": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "ram.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "ram.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"rds": service{
@@ -9074,10 +10210,32 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "runtime.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"runtime.sagemaker": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"s3": service{
@@ -9088,6 +10246,22 @@ var awsusgovPartition = partition{
DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
+ "accesspoint-us-gov-east-1": endpoint{
+ Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "accesspoint-us-gov-west-1": endpoint{
+ Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "fips-accesspoint-us-gov-east-1": endpoint{
+ Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ "fips-accesspoint-us-gov-west-1": endpoint{
+ Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
"fips-us-gov-west-1": endpoint{
Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -9221,6 +10395,46 @@ var awsusgovPartition = partition{
},
},
},
+ "servicecatalog-appregistry": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "servicequotas": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "servicequotas.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "servicequotas.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"sms": service{
Endpoints: endpoints{
@@ -9689,26 +10903,27 @@ var awsisoPartition = partition{
"us-iso-east-1": endpoint{},
},
},
- "ec2metadata": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
+ "ecs": service{
Endpoints: endpoints{
- "aws-global": endpoint{
- Hostname: "169.254.169.254/latest",
- Protocols: []string{"http"},
- },
+ "us-iso-east-1": endpoint{},
},
},
- "ecs": service{
+ "elasticache": service{
Endpoints: endpoints{
"us-iso-east-1": endpoint{},
},
},
- "elasticache": service{
+ "elasticfilesystem": service{
Endpoints: endpoints{
+ "fips-us-iso-east-1": endpoint{
+ Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
"us-iso-east-1": endpoint{},
},
},
@@ -9740,6 +10955,12 @@ var awsisoPartition = partition{
"us-iso-east-1": endpoint{},
},
},
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"glacier": service{
Endpoints: endpoints{
@@ -9791,18 +11012,48 @@ var awsisoPartition = partition{
"us-iso-east-1": endpoint{},
},
},
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"logs": service{
Endpoints: endpoints{
"us-iso-east-1": endpoint{},
},
},
+ "medialive": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "mediapackage": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"monitoring": service{
Endpoints: endpoints{
"us-iso-east-1": endpoint{},
},
},
+ "outposts": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "ram": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"rds": service{
Endpoints: endpoints{
@@ -9828,6 +11079,12 @@ var awsisoPartition = partition{
},
},
},
+ "route53resolver": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"runtime.sagemaker": service{
Endpoints: endpoints{
@@ -9873,6 +11130,12 @@ var awsisoPartition = partition{
},
},
},
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"states": service{
Endpoints: endpoints{
@@ -10042,6 +11305,12 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
+ "ds": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
"dynamodb": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -10058,17 +11327,6 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
- "ec2metadata": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
-
- Endpoints: endpoints{
- "aws-global": endpoint{
- Hostname: "169.254.169.254/latest",
- Protocols: []string{"http"},
- },
- },
- },
"ecs": service{
Endpoints: endpoints{
@@ -10186,6 +11444,19 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
+ "route53": service{
+ PartitionEndpoint: "aws-iso-b-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-iso-b-global": endpoint{
+ Hostname: "route53.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
"s3": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -10261,6 +11532,12 @@ var awsisobPartition = partition{
},
"swf": service{
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
Endpoints: endpoints{
"us-isob-east-1": endpoint{},
},
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
index ca956e5f1..8e8636f5f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -48,6 +48,9 @@ type Options struct {
// This option is ignored if StrictMatching is enabled.
ResolveUnknownService bool
+ // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
+ EC2MetadataEndpointMode EC2IMDSEndpointModeState
+
// STS Regional Endpoint flag helps with resolving the STS endpoint
STSRegionalEndpoint STSRegionalEndpoint
@@ -55,6 +58,33 @@ type Options struct {
S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint
}
+// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode.
+type EC2IMDSEndpointModeState uint
+
+// Enumeration values for EC2IMDSEndpointModeState
+const (
+ EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota
+ EC2IMDSEndpointModeStateIPv4
+ EC2IMDSEndpointModeStateIPv6
+)
+
+// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset
+func (e *EC2IMDSEndpointModeState) SetFromString(v string) error {
+ v = strings.TrimSpace(v)
+
+ switch {
+ case len(v) == 0:
+ *e = EC2IMDSEndpointModeStateUnset
+ case strings.EqualFold(v, "IPv6"):
+ *e = EC2IMDSEndpointModeStateIPv6
+ case strings.EqualFold(v, "IPv4"):
+ *e = EC2IMDSEndpointModeStateIPv4
+ default:
+ return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4")
+ }
+ return nil
+}
+
// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint
// options.
type STSRegionalEndpoint int
@@ -247,7 +277,7 @@ func RegionsForService(ps []Partition, partitionID, serviceID string) (map[strin
if p.ID() != partitionID {
continue
}
- if _, ok := p.p.Services[serviceID]; !ok {
+ if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) {
break
}
@@ -333,6 +363,7 @@ func (p Partition) Regions() map[string]Region {
// enumerating over the services in a partition.
func (p Partition) Services() map[string]Service {
ss := make(map[string]Service, len(p.p.Services))
+
for id := range p.p.Services {
ss[id] = Service{
id: id,
@@ -340,6 +371,15 @@ func (p Partition) Services() map[string]Service {
}
}
+ // Since we have removed the customization that injected this into the model
+ // we still need to pretend that this is a modeled service.
+ if _, ok := ss[Ec2metadataServiceID]; !ok {
+ ss[Ec2metadataServiceID] = Service{
+ id: Ec2metadataServiceID,
+ p: p.p,
+ }
+ }
+
return ss
}
@@ -400,7 +440,18 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve
// an URL that can be resolved to a instance of a service.
func (s Service) Regions() map[string]Region {
rs := map[string]Region{}
- for id := range s.p.Services[s.id].Endpoints {
+
+ service, ok := s.p.Services[s.id]
+
+ // Since ec2metadata customization has been removed we need to check
+ // if it was defined in non-standard endpoints.json file. If it's not
+ // then we can return the empty map as there is no regional-endpoints for IMDS.
+ // Otherwise, we iterate need to iterate the non-standard model.
+ if s.id == Ec2metadataServiceID && !ok {
+ return rs
+ }
+
+ for id := range service.Endpoints {
if r, ok := s.p.Regions[id]; ok {
rs[id] = Region{
id: id,
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
index 773613722..c6c6a0338 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -7,6 +7,11 @@ import (
"strings"
)
+const (
+ ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest"
+ ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest"
+)
+
var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`)
type partitions []partition
@@ -102,6 +107,12 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (
opt.Set(opts...)
s, hasService := p.Services[service]
+
+ if service == Ec2metadataServiceID && !hasService {
+ endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode)
+ return endpoint, nil
+ }
+
if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
// Only return error if the resolver will not fallback to creating
// endpoint based on service endpoint ID passed in.
@@ -129,6 +140,31 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (
return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt)
}
+func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint {
+ switch mode {
+ case EC2IMDSEndpointModeStateIPv6:
+ return ResolvedEndpoint{
+ URL: ec2MetadataEndpointIPv6,
+ PartitionID: partitionID,
+ SigningRegion: "aws-global",
+ SigningName: service,
+ SigningNameDerived: true,
+ SigningMethod: "v4",
+ }
+ case EC2IMDSEndpointModeStateIPv4:
+ fallthrough
+ default:
+ return ResolvedEndpoint{
+ URL: ec2MetadataEndpointIPv4,
+ PartitionID: partitionID,
+ SigningRegion: "aws-global",
+ SigningName: service,
+ SigningNameDerived: true,
+ SigningMethod: "v4",
+ }
+ }
+}
+
func serviceList(ss services) []string {
list := make([]string, 0, len(ss))
for k := range ss {
@@ -178,14 +214,14 @@ type service struct {
}
func (s *service) endpointForRegion(region string) (endpoint, bool) {
- if s.IsRegionalized == boxedFalse {
- return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
- }
-
if e, ok := s.Endpoints[region]; ok {
return e, true
}
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
// Unable to find any matching endpoint, return
// blank that will be used for generic endpoint creation.
return endpoint{}, false
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
index 0fdfcc56e..db6efd605 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -1,3 +1,4 @@
+//go:build codegen
// +build codegen
package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
index d597c6ead..fb0a68fce 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -129,12 +129,27 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
httpReq, _ := http.NewRequest(method, "", nil)
var err error
- httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint)
if err != nil {
httpReq.URL = &url.URL{}
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
}
+ if len(operation.HTTPPath) != 0 {
+ opHTTPPath := operation.HTTPPath
+ var opQueryString string
+ if idx := strings.Index(opHTTPPath, "?"); idx >= 0 {
+ opQueryString = opHTTPPath[idx+1:]
+ opHTTPPath = opHTTPPath[:idx]
+ }
+
+ if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") {
+ opHTTPPath = opHTTPPath[1:]
+ }
+ httpReq.URL.Path += opHTTPPath
+ httpReq.URL.RawQuery = opQueryString
+ }
+
r := &Request{
Config: cfg,
ClientInfo: clientInfo,
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
index e36e468b7..5921b8ff2 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -1,3 +1,4 @@
+//go:build !go1.8
// +build !go1.8
package request
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
index de1292f45..ea643c9c4 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -1,3 +1,4 @@
+//go:build go1.8
// +build go1.8
package request
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
index a7365cd1e..d8c505302 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -1,3 +1,4 @@
+//go:build go1.7
// +build go1.7
package request
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
index 307fa0705..49a243ef2 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -1,3 +1,4 @@
+//go:build !go1.7
// +build !go1.7
package request
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
index fe6dac1f4..3efdac29f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
@@ -9,6 +9,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/ssocreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
@@ -100,10 +101,6 @@ func resolveCredsFromProfile(cfg *aws.Config,
sharedCfg.Creds,
)
- case len(sharedCfg.CredentialProcess) != 0:
- // Get credentials from CredentialProcess
- creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
-
case len(sharedCfg.CredentialSource) != 0:
creds, err = resolveCredsFromSource(cfg, envCfg,
sharedCfg, handlers, sessOpts,
@@ -119,6 +116,13 @@ func resolveCredsFromProfile(cfg *aws.Config,
sharedCfg.RoleSessionName,
)
+ case sharedCfg.hasSSOConfiguration():
+ creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers)
+
+ case len(sharedCfg.CredentialProcess) != 0:
+ // Get credentials from CredentialProcess
+ creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
+
default:
// Fallback to default credentials provider, include mock errors for
// the credential chain so user can identify why credentials failed to
@@ -151,6 +155,25 @@ func resolveCredsFromProfile(cfg *aws.Config,
return creds, nil
}
+func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) {
+ if err := sharedCfg.validateSSOConfiguration(); err != nil {
+ return nil, err
+ }
+
+ cfgCopy := cfg.Copy()
+ cfgCopy.Region = &sharedCfg.SSORegion
+
+ return ssocreds.NewCredentials(
+ &Session{
+ Config: cfgCopy,
+ Handlers: handlers.Copy(),
+ },
+ sharedCfg.SSOAccountID,
+ sharedCfg.SSORoleName,
+ sharedCfg.SSOStartURL,
+ ), nil
+}
+
// valid credential source values
const (
credSourceEc2Metadata = "Ec2InstanceMetadata"
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
index 593aedc42..4390ad52f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
@@ -1,3 +1,4 @@
+//go:build go1.13
// +build go1.13
package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go
index 1bf31cf8e..668565bea 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go
@@ -1,3 +1,4 @@
+//go:build !go1.13 && go1.7
// +build !go1.13,go1.7
package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go
index 253d7bc9d..e101aa6b6 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go
@@ -1,3 +1,4 @@
+//go:build !go1.6 && go1.5
// +build !go1.6,go1.5
package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go
index db2406054..b5fcbe0d1 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go
@@ -1,3 +1,4 @@
+//go:build !go1.7 && go1.6
// +build !go1.7,go1.6
package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
index 9419b518d..43b56863e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -283,7 +283,7 @@ component must be enclosed in square brackets.
The custom EC2 IMDS endpoint can also be specified via the Session options.
sess, err := session.NewSessionWithOptions(session.Options{
- EC2IMDSEndpoint: "http://[::1]",
+ EC2MetadataEndpoint: "http://[::1]",
})
*/
package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index 3cd5d4b5a..fffe2f350 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -161,10 +161,15 @@ type envConfig struct {
// AWS_S3_USE_ARN_REGION=true
S3UseARNRegion bool
- // Specifies the alternative endpoint to use for EC2 IMDS.
+ // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
EC2IMDSEndpoint string
+
+ // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
+ EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
}
var (
@@ -231,6 +236,9 @@ var (
ec2IMDSEndpointEnvKey = []string{
"AWS_EC2_METADATA_SERVICE_ENDPOINT",
}
+ ec2IMDSEndpointModeEnvKey = []string{
+ "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE",
+ }
useCABundleKey = []string{
"AWS_CA_BUNDLE",
}
@@ -364,6 +372,9 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
}
setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey)
+ if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil {
+ return envConfig{}, err
+ }
return cfg, nil
}
@@ -376,3 +387,17 @@ func setFromEnvVal(dst *string, keys []string) {
}
}
}
+
+func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+ if err := mode.SetFromString(value); err != nil {
+ return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err)
+ }
+ return nil
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 08713cc34..4b2e057e9 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -36,7 +36,7 @@ const (
// ErrSharedConfigSourceCollision will be returned if a section contains both
// source_profile and credential_source
-var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil)
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
// variables are empty and Environment was set as the credential source
@@ -283,8 +283,8 @@ type Options struct {
Handlers request.Handlers
// Allows specifying a custom endpoint to be used by the EC2 IMDS client
- // when making requests to the EC2 IMDS API. The must endpoint value must
- // include protocol prefix.
+ // when making requests to the EC2 IMDS API. The endpoint value should
+ // include the URI scheme. If the scheme is not present it will be defaulted to http.
//
// If unset, will the EC2 IMDS client will use its default endpoint.
//
@@ -298,6 +298,11 @@ type Options struct {
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
EC2IMDSEndpoint string
+
+ // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
+ EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
}
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@@ -375,19 +380,23 @@ func Must(sess *Session, err error) *Session {
// Wraps the endpoint resolver with a resolver that will return a custom
// endpoint for EC2 IMDS.
-func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver {
+func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver {
return endpoints.ResolverFunc(
func(service, region string, opts ...func(*endpoints.Options)) (
endpoints.ResolvedEndpoint, error,
) {
- if service == ec2MetadataServiceID {
+ if service == ec2MetadataServiceID && len(endpoint) > 0 {
return endpoints.ResolvedEndpoint{
URL: endpoint,
SigningName: ec2MetadataServiceID,
SigningRegion: region,
}, nil
+ } else if service == ec2MetadataServiceID {
+ opts = append(opts, func(o *endpoints.Options) {
+ o.EC2MetadataEndpointMode = mode
+ })
}
- return resolver.EndpointFor(service, region)
+ return resolver.EndpointFor(service, region, opts...)
})
}
@@ -404,8 +413,8 @@ func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session {
cfg.EndpointResolver = endpoints.DefaultResolver()
}
- if len(envCfg.EC2IMDSEndpoint) != 0 {
- cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint)
+ if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) {
+ cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode)
}
cfg.Credentials = defaults.CredChain(cfg, handlers)
@@ -737,12 +746,32 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
endpoints.LegacyS3UsEast1Endpoint,
})
- ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint
- if len(ec2IMDSEndpoint) == 0 {
- ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint
+ var ec2IMDSEndpoint string
+ for _, v := range []string{
+ sessOpts.EC2IMDSEndpoint,
+ envCfg.EC2IMDSEndpoint,
+ sharedCfg.EC2IMDSEndpoint,
+ } {
+ if len(v) != 0 {
+ ec2IMDSEndpoint = v
+ break
+ }
}
- if len(ec2IMDSEndpoint) != 0 {
- cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint)
+
+ var endpointMode endpoints.EC2IMDSEndpointModeState
+ for _, v := range []endpoints.EC2IMDSEndpointModeState{
+ sessOpts.EC2IMDSEndpointMode,
+ envCfg.EC2IMDSEndpointMode,
+ sharedCfg.EC2IMDSEndpointMode,
+ } {
+ if v != endpoints.EC2IMDSEndpointModeStateUnset {
+ endpointMode = v
+ break
+ }
+ }
+
+ if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset {
+ cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode)
}
// Configure credentials if not already set by the user when creating the
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index be7daacf3..6830ece70 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -2,6 +2,7 @@ package session
import (
"fmt"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -25,6 +26,12 @@ const (
roleSessionNameKey = `role_session_name` // optional
roleDurationSecondsKey = "duration_seconds" // optional
+ // AWS Single Sign-On (AWS SSO) group
+ ssoAccountIDKey = "sso_account_id"
+ ssoRegionKey = "sso_region"
+ ssoRoleNameKey = "sso_role_name"
+ ssoStartURL = "sso_start_url"
+
// CSM options
csmEnabledKey = `csm_enabled`
csmHostKey = `csm_host`
@@ -59,10 +66,18 @@ const (
// S3 ARN Region Usage
s3UseARNRegionKey = "s3_use_arn_region"
+
+ // EC2 IMDS Endpoint Mode
+ ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode"
+
+ // EC2 IMDS Endpoint
+ ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint"
)
// sharedConfig represents the configuration fields of the SDK config files.
type sharedConfig struct {
+ Profile string
+
// Credentials values from the config file. Both aws_access_key_id and
// aws_secret_access_key must be provided together in the same file to be
// considered valid. The values will be ignored if not a complete group.
@@ -78,6 +93,11 @@ type sharedConfig struct {
CredentialProcess string
WebIdentityTokenFile string
+ SSOAccountID string
+ SSORegion string
+ SSORoleName string
+ SSOStartURL string
+
RoleARN string
RoleSessionName string
ExternalID string
@@ -131,6 +151,16 @@ type sharedConfig struct {
//
// s3_use_arn_region=true
S3UseARNRegion bool
+
+ // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
+ //
+ // ec2_metadata_service_endpoint_mode=IPv6
+ EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
+
+ // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
+ //
+ // ec2_metadata_service_endpoint=http://fd00:ec2::254
+ EC2IMDSEndpoint string
}
type sharedConfigFile struct {
@@ -189,6 +219,8 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
}
func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
+ cfg.Profile = profile
+
// Trim files from the list that don't exist.
var skippedFiles int
var profileNotFoundErr error
@@ -217,9 +249,9 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
cfg.clearAssumeRoleOptions()
} else {
// First time a profile has been seen, It must either be a assume role
- // or credentials. Assert if the credential type requires a role ARN,
- // the ARN is also set.
- if err := cfg.validateCredentialsRequireARN(profile); err != nil {
+ // credentials, or SSO. Assert if the credential type requires a role ARN,
+ // the ARN is also set, or validate that the SSO configuration is complete.
+ if err := cfg.validateCredentialsConfig(profile); err != nil {
return err
}
}
@@ -312,6 +344,18 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
}
cfg.S3UsEast1RegionalEndpoint = sre
}
+
+ // AWS Single Sign-On (AWS SSO)
+ updateString(&cfg.SSOAccountID, section, ssoAccountIDKey)
+ updateString(&cfg.SSORegion, section, ssoRegionKey)
+ updateString(&cfg.SSORoleName, section, ssoRoleNameKey)
+ updateString(&cfg.SSOStartURL, section, ssoStartURL)
+
+ if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %s, %v",
+ ec2MetadataServiceEndpointModeKey, file.Filename, err)
+ }
+ updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey)
}
updateString(&cfg.CredentialProcess, section, credentialProcessKey)
@@ -342,6 +386,22 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
return nil
}
+func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error {
+ if !section.Has(key) {
+ return nil
+ }
+ value := section.String(key)
+ return endpointMode.SetFromString(value)
+}
+
+func (cfg *sharedConfig) validateCredentialsConfig(profile string) error {
+ if err := cfg.validateCredentialsRequireARN(profile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
var credSource string
@@ -378,12 +438,43 @@ func (cfg *sharedConfig) validateCredentialType() error {
return nil
}
+func (cfg *sharedConfig) validateSSOConfiguration() error {
+ if !cfg.hasSSOConfiguration() {
+ return nil
+ }
+
+ var missing []string
+ if len(cfg.SSOAccountID) == 0 {
+ missing = append(missing, ssoAccountIDKey)
+ }
+
+ if len(cfg.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(cfg.SSORoleName) == 0 {
+ missing = append(missing, ssoRoleNameKey)
+ }
+
+ if len(cfg.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURL)
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ cfg.Profile, strings.Join(missing, ", "))
+ }
+
+ return nil
+}
+
func (cfg *sharedConfig) hasCredentials() bool {
switch {
case len(cfg.SourceProfileName) != 0:
case len(cfg.CredentialSource) != 0:
case len(cfg.CredentialProcess) != 0:
case len(cfg.WebIdentityTokenFile) != 0:
+ case cfg.hasSSOConfiguration():
case cfg.Creds.HasKeys():
default:
return false
@@ -397,6 +488,10 @@ func (cfg *sharedConfig) clearCredentialOptions() {
cfg.CredentialProcess = ""
cfg.WebIdentityTokenFile = ""
cfg.Creds = credentials.Value{}
+ cfg.SSOAccountID = ""
+ cfg.SSORegion = ""
+ cfg.SSORoleName = ""
+ cfg.SSOStartURL = ""
}
func (cfg *sharedConfig) clearAssumeRoleOptions() {
@@ -407,6 +502,18 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() {
cfg.SourceProfileName = ""
}
+func (cfg *sharedConfig) hasSSOConfiguration() bool {
+ switch {
+ case len(cfg.SSOAccountID) != 0:
+ case len(cfg.SSORegion) != 0:
+ case len(cfg.SSORoleName) != 0:
+ case len(cfg.SSOStartURL) != 0:
+ default:
+ return false
+ }
+ return true
+}
+
func oneOrNone(bs ...bool) bool {
var count int
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
index 07ea799fb..993753831 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -34,23 +34,23 @@ func (m mapRule) IsValid(value string) bool {
return ok
}
-// whitelist is a generic rule for whitelisting
-type whitelist struct {
+// allowList is a generic rule for allow listing
+type allowList struct {
rule
}
-// IsValid for whitelist checks if the value is within the whitelist
-func (w whitelist) IsValid(value string) bool {
+// IsValid for allow list checks if the value is within the allow list
+func (w allowList) IsValid(value string) bool {
return w.rule.IsValid(value)
}
-// blacklist is a generic rule for blacklisting
-type blacklist struct {
+// excludeList is a generic rule for exclude listing
+type excludeList struct {
rule
}
-// IsValid for whitelist checks if the value is within the whitelist
-func (b blacklist) IsValid(value string) bool {
+// IsValid for exclude list checks if the value is within the exclude list
+func (b excludeList) IsValid(value string) bool {
return !b.rule.IsValid(value)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go
index f35fc860b..cf672b6ac 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go
@@ -1,3 +1,4 @@
+//go:build !go1.7
// +build !go1.7
package v4
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go
index fed5c859c..21fe74e6f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go
@@ -1,3 +1,4 @@
+//go:build go1.7
// +build go1.7
package v4
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
index bd082e9d1..7711ec737 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -1,3 +1,4 @@
+//go:build go1.5
// +build go1.5
package v4
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index d71f7b3f4..d4653031f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -90,7 +90,7 @@ const (
)
var ignoredHeaders = rules{
- blacklist{
+ excludeList{
mapRule{
authorizationHeader: struct{}{},
"User-Agent": struct{}{},
@@ -99,9 +99,9 @@ var ignoredHeaders = rules{
},
}
-// requiredSignedHeaders is a whitelist for build canonical headers.
+// requiredSignedHeaders is a allow list for build canonical headers.
var requiredSignedHeaders = rules{
- whitelist{
+ allowList{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
@@ -145,12 +145,13 @@ var requiredSignedHeaders = rules{
},
},
patterns{"X-Amz-Meta-"},
+ patterns{"X-Amz-Object-Lock-"},
}
-// allowedHoisting is a whitelist for build query headers. The boolean value
+// allowedHoisting is a allow list for build query headers. The boolean value
// represents whether or not it is a pattern.
var allowedQueryHoisting = inclusiveRules{
- blacklist{requiredSignedHeaders},
+ excludeList{requiredSignedHeaders},
patterns{"X-Amz-"},
}
@@ -417,7 +418,7 @@ var SignRequestHandler = request.NamedHandler{
// request handler should only be used with the SDK's built in service client's
// API operation requests.
//
-// This function should not be used on its on its own, but in conjunction with
+// This function should not be used on its own, but in conjunction with
// an AWS service client's API operation call. To sign a standalone request
// not created by a service client's API operation method use the "Sign" or
// "Presign" functions of the "Signer" type.
@@ -689,9 +690,12 @@ func (ctx *signingCtx) buildBodyDigest() error {
if hash == "" {
includeSHA256Header := ctx.unsignedPayload ||
ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "s3-object-lambda" ||
ctx.ServiceName == "glacier"
- s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
+ s3Presign := ctx.isPresign &&
+ (ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "s3-object-lambda")
if ctx.unsignedPayload || s3Presign {
hash = "UNSIGNED-PAYLOAD"
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
index 6192b2455..fed561bd5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/url.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -1,3 +1,4 @@
+//go:build go1.8
// +build go1.8
package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
index 0210d2720..95282db03 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -1,3 +1,4 @@
+//go:build !go1.8
// +build !go1.8
package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 919084d52..34a8aff7a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.36.15"
+const SDKVersion = "1.40.37"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go
index 876dcb3fd..365345353 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go
@@ -1,3 +1,4 @@
+//go:build !go1.7
// +build !go1.7
package context
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
index 25ce0fe13..1e55bbd07 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
@@ -13,17 +13,30 @@
// }
//
// Below is the BNF that describes this parser
-// Grammar:
-// stmt -> value stmt'
-// stmt' -> epsilon | op stmt
-// value -> number | string | boolean | quoted_string
+// Grammar:
+// stmt -> section | stmt'
+// stmt' -> epsilon | expr
+// expr -> value (stmt)* | equal_expr (stmt)*
+// equal_expr -> value ( ':' | '=' ) equal_expr'
+// equal_expr' -> number | string | quoted_string
+// quoted_string -> " quoted_string'
+// quoted_string' -> string quoted_string_end
+// quoted_string_end -> "
//
-// section -> [ section'
-// section' -> value section_close
-// section_close -> ]
+// section -> [ section'
+// section' -> section_value section_close
+// section_value -> number | string_subset | boolean | quoted_string_subset
+// quoted_string_subset -> " quoted_string_subset'
+// quoted_string_subset' -> string_subset quoted_string_end
+// quoted_string_subset -> "
+// section_close -> ]
//
-// SkipState will skip (NL WS)+
+// value -> number | string_subset | boolean
+// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ?
+// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ?
//
-// comment -> # comment' | ; comment'
-// comment' -> epsilon | value
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment'
+// comment' -> epsilon | value
package ini
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
index 8d462f77e..6e545b63b 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
@@ -1,3 +1,4 @@
+//go:build gofuzz
// +build gofuzz
package ini
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
index 55fa73ebc..0ba319491 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -5,9 +5,12 @@ import (
"io"
)
+// ParseState represents the current state of the parser.
+type ParseState uint
+
// State enums for the parse table
const (
- InvalidState = iota
+ InvalidState ParseState = iota
// stmt -> value stmt'
StatementState
// stmt' -> MarkComplete | op stmt
@@ -36,8 +39,8 @@ const (
)
// parseTable is a state machine to dictate the grammar above.
-var parseTable = map[ASTKind]map[TokenType]int{
- ASTKindStart: map[TokenType]int{
+var parseTable = map[ASTKind]map[TokenType]ParseState{
+ ASTKindStart: {
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
@@ -45,7 +48,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: TerminalState,
},
- ASTKindCommentStatement: map[TokenType]int{
+ ASTKindCommentStatement: {
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
@@ -53,7 +56,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindExpr: map[TokenType]int{
+ ASTKindExpr: {
TokenOp: StatementPrimeState,
TokenLit: ValueState,
TokenSep: OpenScopeState,
@@ -62,13 +65,15 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindEqualExpr: map[TokenType]int{
- TokenLit: ValueState,
- TokenWS: SkipTokenState,
- TokenNL: SkipState,
- TokenNone: SkipState,
+ ASTKindEqualExpr: {
+ TokenLit: ValueState,
+ TokenSep: ValueState,
+ TokenOp: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ TokenNone: SkipState,
},
- ASTKindStatement: map[TokenType]int{
+ ASTKindStatement: {
TokenLit: SectionState,
TokenSep: CloseScopeState,
TokenWS: SkipTokenState,
@@ -76,9 +81,9 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindExprStatement: map[TokenType]int{
+ ASTKindExprStatement: {
TokenLit: ValueState,
- TokenSep: OpenScopeState,
+ TokenSep: ValueState,
TokenOp: ValueState,
TokenWS: ValueState,
TokenNL: MarkCompleteState,
@@ -86,14 +91,14 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenNone: TerminalState,
TokenComma: SkipState,
},
- ASTKindSectionStatement: map[TokenType]int{
+ ASTKindSectionStatement: {
TokenLit: SectionState,
TokenOp: SectionState,
TokenSep: CloseScopeState,
TokenWS: SectionState,
TokenNL: SkipTokenState,
},
- ASTKindCompletedSectionStatement: map[TokenType]int{
+ ASTKindCompletedSectionStatement: {
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenLit: StatementState,
@@ -101,7 +106,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindSkipStatement: map[TokenType]int{
+ ASTKindSkipStatement: {
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
@@ -205,18 +210,6 @@ loop:
case ValueState:
// ValueState requires the previous state to either be an equal expression
// or an expression statement.
- //
- // This grammar occurs when the RHS is a number, word, or quoted string.
- // equal_expr -> lit op equal_expr'
- // equal_expr' -> number | string | quoted_string
- // quoted_string -> " quoted_string'
- // quoted_string' -> string quoted_string_end
- // quoted_string_end -> "
- //
- // otherwise
- // expr_stmt -> equal_expr (expr_stmt')*
- // expr_stmt' -> ws S | op S | MarkComplete
- // S -> equal_expr' expr_stmt'
switch k.Kind {
case ASTKindEqualExpr:
// assigning a value to some key
@@ -243,7 +236,7 @@ loop:
}
children[len(children)-1] = rhs
- k.SetChildren(children)
+ root.SetChildren(children)
stack.Push(k)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
index 94841c324..081cf4334 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
@@ -50,7 +50,10 @@ func (v *DefaultVisitor) VisitExpr(expr AST) error {
rhs := children[1]
- if rhs.Root.Type() != TokenLit {
+ // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values.
+ // If the token is not either a literal or one of the token types that identifies those four additional
+ // tokens then error.
+ if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) {
return NewParseError("unexpected token type")
}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
index 5aa9137e0..037a998c4 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
@@ -1,3 +1,4 @@
+//go:build !go1.7
// +build !go1.7
package sdkio
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
index e5f005613..65e7c60c4 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
@@ -1,3 +1,4 @@
+//go:build go1.7
// +build go1.7
package sdkio
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
index 44898eed0..a84528783 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
@@ -1,3 +1,4 @@
+//go:build go1.10
// +build go1.10
package sdkmath
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
index 810ec7f08..a3ae3e5db 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
@@ -1,3 +1,4 @@
+//go:build !go1.10
// +build !go1.10
package sdkmath
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
index f4651da2d..4bae66cee 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
@@ -1,3 +1,4 @@
+//go:build go1.6
// +build go1.6
package sdkrand
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
index b1d93a33d..3a6ab8825 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
@@ -1,3 +1,4 @@
+//go:build !go1.6
// +build !go1.6
package sdkrand
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
index d7d42db0a..1f1d27aea 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
@@ -1,9 +1,10 @@
package protocol
import (
- "strings"
-
"github.com/aws/aws-sdk-go/aws/request"
+ "net"
+ "strconv"
+ "strings"
)
// ValidateEndpointHostHandler is a request handler that will validate the
@@ -22,8 +23,26 @@ var ValidateEndpointHostHandler = request.NamedHandler{
// 3986 host. Returns error if the host is not valid.
func ValidateEndpointHost(opName, host string) error {
paramErrs := request.ErrInvalidParams{Context: opName}
- labels := strings.Split(host, ".")
+ var hostname string
+ var port string
+ var err error
+
+ if strings.Contains(host, ":") {
+ hostname, port, err = net.SplitHostPort(host)
+
+ if err != nil {
+ paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host))
+ }
+
+ if !ValidPortNumber(port) {
+ paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port))
+ }
+ } else {
+ hostname = host
+ }
+
+ labels := strings.Split(hostname, ".")
for i, label := range labels {
if i == len(labels)-1 && len(label) == 0 {
// Allow trailing dot for FQDN hosts.
@@ -36,7 +55,11 @@ func ValidateEndpointHost(opName, host string) error {
}
}
- if len(host) > 255 {
+ if len(hostname) == 0 {
+ paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1))
+ }
+
+ if len(hostname) > 255 {
paramErrs.Add(request.NewErrParamMaxLen(
"endpoint host", 255, host,
))
@@ -66,3 +89,16 @@ func ValidHostLabel(label string) bool {
return true
}
+
+// ValidPortNumber return if the port is valid RFC 3986 port
+func ValidPortNumber(port string) bool {
+ i, err := strconv.Atoi(port)
+ if err != nil {
+ return false
+ }
+
+ if i < 0 || i > 65535 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
new file mode 100644
index 000000000..a029217e4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
@@ -0,0 +1,88 @@
+// Package jsonrpc provides JSON RPC utilities for serialization of AWS
+// requests and responses.
+package jsonrpc
+
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+var emptyJSON = []byte("{}")
+
+// BuildHandler is a named request handler for building jsonrpc protocol
+// requests
+var BuildHandler = request.NamedHandler{
+ Name: "awssdk.jsonrpc.Build",
+ Fn: Build,
+}
+
+// UnmarshalHandler is a named request handler for unmarshaling jsonrpc
+// protocol requests
+var UnmarshalHandler = request.NamedHandler{
+ Name: "awssdk.jsonrpc.Unmarshal",
+ Fn: Unmarshal,
+}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc
+// protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{
+ Name: "awssdk.jsonrpc.UnmarshalMeta",
+ Fn: UnmarshalMeta,
+}
+
+// Build builds a JSON payload for a JSON RPC request.
+func Build(req *request.Request) {
+ var buf []byte
+ var err error
+ if req.ParamsFilled() {
+ buf, err = jsonutil.BuildJSON(req.Params)
+ if err != nil {
+ req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err)
+ return
+ }
+ } else {
+ buf = emptyJSON
+ }
+
+ if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" {
+ req.SetBufferBody(buf)
+ }
+
+ if req.ClientInfo.TargetPrefix != "" {
+ target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
+ req.HTTPRequest.Header.Add("X-Amz-Target", target)
+ }
+
+ // Only set the content type if one is not already specified and an
+ // JSONVersion is specified.
+ if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 {
+ jsonVersion := req.ClientInfo.JSONVersion
+ req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion)
+ }
+}
+
+// Unmarshal unmarshals a response for a JSON RPC service.
+func Unmarshal(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+ if req.DataFilled() {
+ err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+ }
+ }
+ return
+}
+
+// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
+func UnmarshalMeta(req *request.Request) {
+ rest.UnmarshalMeta(req)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
new file mode 100644
index 000000000..c0c52e2db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
@@ -0,0 +1,107 @@
+package jsonrpc
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+)
+
+// UnmarshalTypedError provides unmarshaling errors API response errors
+// for both typed and untyped errors.
+type UnmarshalTypedError struct {
+ exceptions map[string]func(protocol.ResponseMetadata) error
+}
+
+// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
+// set of exception names to the error unmarshalers
+func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
+ return &UnmarshalTypedError{
+ exceptions: exceptions,
+ }
+}
+
+// UnmarshalError attempts to unmarshal the HTTP response error as a known
+// error type. If unable to unmarshal the error type, the generic SDK error
+// type will be used.
+func (u *UnmarshalTypedError) UnmarshalError(
+ resp *http.Response,
+ respMeta protocol.ResponseMetadata,
+) (error, error) {
+
+ var buf bytes.Buffer
+ var jsonErr jsonErrorResponse
+ teeReader := io.TeeReader(resp.Body, &buf)
+ err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
+ if err != nil {
+ return nil, err
+ }
+ body := ioutil.NopCloser(&buf)
+
+ // Code may be separated by hash(#), with the last element being the code
+ // used by the SDK.
+ codeParts := strings.SplitN(jsonErr.Code, "#", 2)
+ code := codeParts[len(codeParts)-1]
+ msg := jsonErr.Message
+
+ if fn, ok := u.exceptions[code]; ok {
+ // If exception code is know, use associated constructor to get a value
+ // for the exception that the JSON body can be unmarshaled into.
+ v := fn(respMeta)
+ err := jsonutil.UnmarshalJSONCaseInsensitive(v, body)
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ }
+
+ // fallback to unmodeled generic exceptions
+ return awserr.NewRequestFailure(
+ awserr.New(code, msg, nil),
+ respMeta.StatusCode,
+ respMeta.RequestID,
+ ), nil
+}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc
+// protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{
+ Name: "awssdk.jsonrpc.UnmarshalError",
+ Fn: UnmarshalError,
+}
+
+// UnmarshalError unmarshals an error response for a JSON RPC service.
+func UnmarshalError(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+
+ var jsonErr jsonErrorResponse
+ err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal error message", err),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+ return
+ }
+
+ codes := strings.SplitN(jsonErr.Code, "#", 2)
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"__type"`
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
index 1301b149d..fb35fee5f 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -98,7 +98,7 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo
// Support the ability to customize values to be marshaled as a
// blob even though they were modeled as a string. Required for S3
- // API operations like SSECustomerKey is modeled as stirng but
+ // API operations like SSECustomerKey is modeled as string but
// required to be base64 encoded in request.
if field.Tag.Get("marshal-as") == "blob" {
m = m.Convert(byteSliceType)
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
new file mode 100644
index 000000000..2e0e205af
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
@@ -0,0 +1,59 @@
+// Package restjson provides RESTful JSON serialization of AWS
+// requests and responses.
+package restjson
+
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
+
+import (
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+// BuildHandler is a named request handler for building restjson protocol
+// requests
+var BuildHandler = request.NamedHandler{
+ Name: "awssdk.restjson.Build",
+ Fn: Build,
+}
+
+// UnmarshalHandler is a named request handler for unmarshaling restjson
+// protocol requests
+var UnmarshalHandler = request.NamedHandler{
+ Name: "awssdk.restjson.Unmarshal",
+ Fn: Unmarshal,
+}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restjson
+// protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{
+ Name: "awssdk.restjson.UnmarshalMeta",
+ Fn: UnmarshalMeta,
+}
+
+// Build builds a request for the REST JSON protocol.
+func Build(r *request.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 {
+ r.HTTPRequest.Header.Set("Content-Type", "application/json")
+ }
+ jsonrpc.Build(r)
+ }
+}
+
+// Unmarshal unmarshals a response body for the REST JSON protocol.
+func Unmarshal(r *request.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ jsonrpc.Unmarshal(r)
+ } else {
+ rest.Unmarshal(r)
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST JSON protocol.
+func UnmarshalMeta(r *request.Request) {
+ rest.UnmarshalMeta(r)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
new file mode 100644
index 000000000..d756d8cc5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
@@ -0,0 +1,134 @@
+package restjson
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ errorTypeHeader = "X-Amzn-Errortype"
+ errorMessageHeader = "X-Amzn-Errormessage"
+)
+
+// UnmarshalTypedError provides unmarshaling errors API response errors
+// for both typed and untyped errors.
+type UnmarshalTypedError struct {
+ exceptions map[string]func(protocol.ResponseMetadata) error
+}
+
+// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
+// set of exception names to the error unmarshalers
+func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
+ return &UnmarshalTypedError{
+ exceptions: exceptions,
+ }
+}
+
+// UnmarshalError attempts to unmarshal the HTTP response error as a known
+// error type. If unable to unmarshal the error type, the generic SDK error
+// type will be used.
+func (u *UnmarshalTypedError) UnmarshalError(
+ resp *http.Response,
+ respMeta protocol.ResponseMetadata,
+) (error, error) {
+
+ code := resp.Header.Get(errorTypeHeader)
+ msg := resp.Header.Get(errorMessageHeader)
+
+ body := resp.Body
+ if len(code) == 0 {
+ // If unable to get code from HTTP headers have to parse JSON message
+ // to determine what kind of exception this will be.
+ var buf bytes.Buffer
+ var jsonErr jsonErrorResponse
+ teeReader := io.TeeReader(resp.Body, &buf)
+ err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
+ if err != nil {
+ return nil, err
+ }
+
+ body = ioutil.NopCloser(&buf)
+ code = jsonErr.Code
+ msg = jsonErr.Message
+ }
+
+ // If code has colon separators remove them so can compare against modeled
+ // exception names.
+ code = strings.SplitN(code, ":", 2)[0]
+
+ if fn, ok := u.exceptions[code]; ok {
+ // If exception code is know, use associated constructor to get a value
+ // for the exception that the JSON body can be unmarshaled into.
+ v := fn(respMeta)
+ if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil {
+ return nil, err
+ }
+
+ if err := rest.UnmarshalResponse(resp, v, true); err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ }
+
+ // fallback to unmodeled generic exceptions
+ return awserr.NewRequestFailure(
+ awserr.New(code, msg, nil),
+ respMeta.StatusCode,
+ respMeta.RequestID,
+ ), nil
+}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restjson
+// protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{
+ Name: "awssdk.restjson.UnmarshalError",
+ Fn: UnmarshalError,
+}
+
+// UnmarshalError unmarshals a response error for the REST JSON protocol.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var jsonErr jsonErrorResponse
+ err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal response error", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ code := r.HTTPResponse.Header.Get(errorTypeHeader)
+ if code == "" {
+ code = jsonErr.Code
+ }
+ msg := r.HTTPResponse.Header.Get(errorMessageHeader)
+ if msg == "" {
+ msg = jsonErr.Message
+ }
+
+ code = strings.SplitN(code, ":", 2)[0]
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(code, jsonErr.Message, nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
index 98f4caed9..d9a4e7649 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -1,6 +1,8 @@
package protocol
import (
+ "bytes"
+ "fmt"
"math"
"strconv"
"time"
@@ -19,13 +21,16 @@ const (
// Output time is intended to not contain decimals
const (
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
- RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+ RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+ rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
+ rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
// This format is used for output time without seconds precision
RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
- ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
+ ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
+ iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999"
// This format is used for output time with fractional second precision up to milliseconds
ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z"
@@ -67,10 +72,21 @@ func FormatTime(name string, t time.Time) string {
// the time if it was able to be parsed, and fails otherwise.
func ParseTime(formatName, value string) (time.Time, error) {
switch formatName {
- case RFC822TimeFormatName:
- return time.Parse(RFC822TimeFormat, value)
- case ISO8601TimeFormatName:
- return time.Parse(ISO8601TimeFormat, value)
+ case RFC822TimeFormatName: // Smithy HTTPDate format
+ return tryParse(value,
+ RFC822TimeFormat,
+ rfc822TimeFormatSingleDigitDay,
+ rfc822TimeFormatSingleDigitDayTwoDigitYear,
+ time.RFC850,
+ time.ANSIC,
+ )
+ case ISO8601TimeFormatName: // Smithy DateTime format
+ return tryParse(value,
+ ISO8601TimeFormat,
+ iso8601TimeFormatNoZ,
+ time.RFC3339Nano,
+ time.RFC3339,
+ )
case UnixTimeFormatName:
v, err := strconv.ParseFloat(value, 64)
_, dec := math.Modf(v)
@@ -83,3 +99,36 @@ func ParseTime(formatName, value string) (time.Time, error) {
panic("unknown timestamp format name, " + formatName)
}
}
+
+func tryParse(v string, formats ...string) (time.Time, error) {
+ var errs parseErrors
+ for _, f := range formats {
+ t, err := time.Parse(f, v)
+ if err != nil {
+ errs = append(errs, parseError{
+ Format: f,
+ Err: err,
+ })
+ continue
+ }
+ return t, nil
+ }
+
+ return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs)
+}
+
+type parseErrors []parseError
+
+func (es parseErrors) Error() string {
+ var s bytes.Buffer
+ for _, e := range es {
+ fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err)
+ }
+
+ return "parse errors:" + s.String()
+}
+
+type parseError struct {
+ Format string
+ Err error
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
index 09ad95159..2fbb93ae7 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -308,6 +308,8 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
attr := xml.Attr{Name: xname, Value: str}
current.Attr = append(current.Attr, attr)
+ } else if len(xname.Local) == 0 {
+ current.Text = str
} else { // regular text node
current.AddChild(&XMLNode{Name: xname, Text: str})
}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
index 42f71648e..c85b79fdd 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -18,6 +18,14 @@ type XMLNode struct {
parent *XMLNode
}
+// textEncoder is a string type alias that implemnts the TextMarshaler interface.
+// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped.
+type textEncoder string
+
+func (t textEncoder) MarshalText() ([]byte, error) {
+ return []byte(t), nil
+}
+
// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
func NewXMLElement(name xml.Name) *XMLNode {
return &XMLNode{
@@ -130,11 +138,16 @@ func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
attrs = sortedAttrs
}
- e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs})
+ startElement := xml.StartElement{Name: node.Name, Attr: attrs}
if node.Text != "" {
- e.EncodeToken(xml.CharData([]byte(node.Text)))
- } else if sorted {
+ e.EncodeElement(textEncoder(node.Text), startElement)
+ return e.Flush()
+ }
+
+ e.EncodeToken(startElement)
+
+ if sorted {
sortedNames := []string{}
for k := range node.Children {
sortedNames = append(sortedNames, k)
@@ -154,6 +167,7 @@ func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
}
}
- e.EncodeToken(xml.EndElement{Name: node.Name})
+ e.EncodeToken(startElement.End())
+
return e.Flush()
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
index 59ea423d9..98585e001 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
@@ -515,19 +515,20 @@ func (c *EC2) AdvertiseByoipCidrRequest(input *AdvertiseByoipCidrInput) (req *re
// AdvertiseByoipCidr API operation for Amazon Elastic Compute Cloud.
//
// Advertises an IPv4 or IPv6 address range that is provisioned for use with
-// your AWS resources through bring your own IP addresses (BYOIP).
+// your Amazon Web Services resources through bring your own IP addresses (BYOIP).
//
// You can perform this operation at most once every 10 seconds, even if you
// specify different address ranges each time.
//
// We recommend that you stop advertising the BYOIP CIDR from other locations
-// when you advertise it from AWS. To minimize down time, you can configure
-// your AWS resources to use an address from a BYOIP CIDR before it is advertised,
-// and then simultaneously stop advertising it from the current location and
-// start advertising it through AWS.
+// when you advertise it from Amazon Web Services. To minimize down time, you
+// can configure your Amazon Web Services resources to use an address from a
+// BYOIP CIDR before it is advertised, and then simultaneously stop advertising
+// it from the current location and start advertising it through Amazon Web
+// Services.
//
// It can take a few minutes before traffic to the specified addresses starts
-// routing to AWS because of BGP propagation delays.
+// routing to Amazon Web Services because of BGP propagation delays.
//
// To stop advertising the BYOIP CIDR, use WithdrawByoipCidr.
//
@@ -603,23 +604,24 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.
// AllocateAddress API operation for Amazon Elastic Compute Cloud.
//
-// Allocates an Elastic IP address to your AWS account. After you allocate the
-// Elastic IP address you can associate it with an instance or network interface.
-// After you release an Elastic IP address, it is released to the IP address
-// pool and can be allocated to a different AWS account.
-//
-// You can allocate an Elastic IP address from an address pool owned by AWS
-// or from an address pool created from a public IPv4 address range that you
-// have brought to AWS for use with your AWS resources using bring your own
-// IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses
-// (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)
+// Allocates an Elastic IP address to your Amazon Web Services account. After
+// you allocate the Elastic IP address you can associate it with an instance
+// or network interface. After you release an Elastic IP address, it is released
+// to the IP address pool and can be allocated to a different Amazon Web Services
+// account.
+//
+// You can allocate an Elastic IP address from an address pool owned by Amazon
+// Web Services or from an address pool created from a public IPv4 address range
+// that you have brought to Amazon Web Services for use with your Amazon Web
+// Services resources using bring your own IP addresses (BYOIP). For more information,
+// see Bring Your Own IP Addresses (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// [EC2-VPC] If you release an Elastic IP address, you might be able to recover
// it. You cannot recover an Elastic IP address that you released after it is
-// allocated to another AWS account. You cannot recover an Elastic IP address
-// for EC2-Classic. To attempt to recover an Elastic IP address that you released,
-// specify it in this operation.
+// allocated to another Amazon Web Services account. You cannot recover an Elastic
+// IP address for EC2-Classic. To attempt to recover an Elastic IP address that
+// you released, specify it in this operation.
//
// An Elastic IP address is for use either in the EC2-Classic platform or in
// a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic
@@ -868,6 +870,12 @@ func (c *EC2) AssignIpv6AddressesRequest(input *AssignIpv6AddressesInput) (req *
// You must specify either the IPv6 addresses or the IPv6 address count in the
// request.
//
+// You can optionally use Prefix Delegation on the network interface. You must
+// specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation
+// count. For information, see Assigning prefixes to Amazon EC2 network interfaces
+// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -961,6 +969,12 @@ func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInp
//
// You must specify either the IP addresses or the IP address count in the request.
//
+// You can optionally use Prefix Delegation on the network interface. You must
+// specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation
+// count. For information, see Assigning prefixes to Amazon EC2 network interfaces
+// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1233,7 +1247,7 @@ func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req
// its DHCP lease. You can explicitly renew the lease using the operating system
// on the instance.
//
-// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
+// For more information, see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1308,25 +1322,25 @@ func (c *EC2) AssociateEnclaveCertificateIamRoleRequest(input *AssociateEnclaveC
// AssociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud.
//
-// Associates an AWS Identity and Access Management (IAM) role with an AWS Certificate
+// Associates an Identity and Access Management (IAM) role with an Certificate
// Manager (ACM) certificate. This enables the certificate to be used by the
// ACM for Nitro Enclaves application inside an enclave. For more information,
-// see AWS Certificate Manager for Nitro Enclaves (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html)
-// in the AWS Nitro Enclaves User Guide.
+// see Certificate Manager for Nitro Enclaves (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html)
+// in the Amazon Web Services Nitro Enclaves User Guide.
//
-// When the IAM role is associated with the ACM certificate, places the certificate,
-// certificate chain, and encrypted private key in an Amazon S3 bucket that
-// only the associated IAM role can access. The private key of the certificate
-// is encrypted with an AWS-managed KMS customer master (CMK) that has an attached
-// attestation-based CMK policy.
+// When the IAM role is associated with the ACM certificate, the certificate,
+// certificate chain, and encrypted private key are placed in an Amazon S3 bucket
+// that only the associated IAM role can access. The private key of the certificate
+// is encrypted with an Amazon Web Services managed key that has an attached
+// attestation-based key policy.
//
// To enable the IAM role to access the Amazon S3 object, you must grant it
// permission to call s3:GetObject on the Amazon S3 bucket returned by the command.
-// To enable the IAM role to access the AWS KMS CMK, you must grant it permission
-// to call kms:Decrypt on AWS KMS CMK returned by the command. For more information,
+// To enable the IAM role to access the KMS key, you must grant it permission
+// to call kms:Decrypt on the KMS key returned by the command. For more information,
// see Grant the role permission to access the certificate and encryption key
// (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html#add-policy)
-// in the AWS Nitro Enclaves User Guide.
+// in the Amazon Web Services Nitro Enclaves User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1431,6 +1445,85 @@ func (c *EC2) AssociateIamInstanceProfileWithContext(ctx aws.Context, input *Ass
return out, req.Send()
}
+const opAssociateInstanceEventWindow = "AssociateInstanceEventWindow"
+
+// AssociateInstanceEventWindowRequest generates a "aws/request.Request" representing the
+// client's request for the AssociateInstanceEventWindow operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssociateInstanceEventWindow for more information on using the AssociateInstanceEventWindow
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssociateInstanceEventWindowRequest method.
+// req, resp := client.AssociateInstanceEventWindowRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateInstanceEventWindow
+func (c *EC2) AssociateInstanceEventWindowRequest(input *AssociateInstanceEventWindowInput) (req *request.Request, output *AssociateInstanceEventWindowOutput) {
+ op := &request.Operation{
+ Name: opAssociateInstanceEventWindow,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssociateInstanceEventWindowInput{}
+ }
+
+ output = &AssociateInstanceEventWindowOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssociateInstanceEventWindow API operation for Amazon Elastic Compute Cloud.
+//
+// Associates one or more targets with an event window. Only one type of target
+// (instance IDs, Dedicated Host IDs, or tags) can be specified with an event
+// window.
+//
+// For more information, see Define event windows for scheduled events (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/event-windows.html)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation AssociateInstanceEventWindow for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateInstanceEventWindow
+func (c *EC2) AssociateInstanceEventWindow(input *AssociateInstanceEventWindowInput) (*AssociateInstanceEventWindowOutput, error) {
+ req, out := c.AssociateInstanceEventWindowRequest(input)
+ return out, req.Send()
+}
+
+// AssociateInstanceEventWindowWithContext is the same as AssociateInstanceEventWindow with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssociateInstanceEventWindow for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) AssociateInstanceEventWindowWithContext(ctx aws.Context, input *AssociateInstanceEventWindowInput, opts ...request.Option) (*AssociateInstanceEventWindowOutput, error) {
+ req, out := c.AssociateInstanceEventWindowRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opAssociateRouteTable = "AssociateRouteTable"
// AssociateRouteTableRequest generates a "aws/request.Request" representing the
@@ -1482,7 +1575,7 @@ func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *
// in order to disassociate the route table later. A route table can be associated
// with multiple subnets.
//
-// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
+// For more information, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1743,6 +1836,89 @@ func (c *EC2) AssociateTransitGatewayRouteTableWithContext(ctx aws.Context, inpu
return out, req.Send()
}
+const opAssociateTrunkInterface = "AssociateTrunkInterface"
+
+// AssociateTrunkInterfaceRequest generates a "aws/request.Request" representing the
+// client's request for the AssociateTrunkInterface operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssociateTrunkInterface for more information on using the AssociateTrunkInterface
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssociateTrunkInterfaceRequest method.
+// req, resp := client.AssociateTrunkInterfaceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateTrunkInterface
+func (c *EC2) AssociateTrunkInterfaceRequest(input *AssociateTrunkInterfaceInput) (req *request.Request, output *AssociateTrunkInterfaceOutput) {
+ op := &request.Operation{
+ Name: opAssociateTrunkInterface,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssociateTrunkInterfaceInput{}
+ }
+
+ output = &AssociateTrunkInterfaceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssociateTrunkInterface API operation for Amazon Elastic Compute Cloud.
+//
+//
+// This API action is currently in limited preview only. If you are interested
+// in using this feature, contact your account manager.
+//
+// Associates a branch network interface with a trunk network interface.
+//
+// Before you create the association, run the create-network-interface (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)
+// command and set --interface-type to trunk. You must also create a network
+// interface for each branch network interface that you want to associate with
+// the trunk network interface.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation AssociateTrunkInterface for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateTrunkInterface
+func (c *EC2) AssociateTrunkInterface(input *AssociateTrunkInterfaceInput) (*AssociateTrunkInterfaceOutput, error) {
+ req, out := c.AssociateTrunkInterfaceRequest(input)
+ return out, req.Send()
+}
+
+// AssociateTrunkInterfaceWithContext is the same as AssociateTrunkInterface with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssociateTrunkInterface for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) AssociateTrunkInterfaceWithContext(ctx aws.Context, input *AssociateTrunkInterfaceInput, opts ...request.Option) (*AssociateTrunkInterfaceOutput, error) {
+ req, out := c.AssociateTrunkInterfaceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opAssociateVpcCidrBlock = "AssociateVpcCidrBlock"
// AssociateVpcCidrBlockRequest generates a "aws/request.Request" representing the
@@ -1797,7 +1973,7 @@ func (c *EC2) AssociateVpcCidrBlockRequest(input *AssociateVpcCidrBlockInput) (r
// an IPv6 pool, or an Amazon-provided IPv6 CIDR block.
//
// For more information about associating CIDR blocks with your VPC and applicable
-// restrictions, see VPC and Subnet Sizing (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing)
+// restrictions, see VPC and subnet sizing (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -2117,13 +2293,14 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques
// in the Amazon Elastic Compute Cloud User Guide.
//
// After you attach an EBS volume, you must make it available. For more information,
-// see Making an EBS volume available for use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html).
+// see Make an EBS volume available for use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html).
//
-// If a volume has an AWS Marketplace product code:
+// If a volume has an Amazon Web Services Marketplace product code:
//
// * The volume can be attached only to a stopped instance.
//
-// * AWS Marketplace product codes are copied from the volume to the instance.
+// * Amazon Web Services Marketplace product codes are copied from the volume
+// to the instance.
//
// * You must be subscribed to the product.
//
@@ -2131,7 +2308,7 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques
// the product. For example, you can't detach a volume from a Windows instance
// and attach it to a Linux instance.
//
-// For more information, see Attaching Amazon EBS volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html)
+// For more information, see Attach an Amazon EBS volume to an instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -2356,18 +2533,17 @@ func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupE
output = &AuthorizeSecurityGroupEgressOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// AuthorizeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud.
//
-// [VPC only] Adds the specified egress rules to a security group for use with
-// a VPC.
+// [VPC only] Adds the specified outbound (egress) rules to a security group
+// for use with a VPC.
//
// An outbound rule permits instances to send traffic to the specified IPv4
-// or IPv6 CIDR address ranges, or to the instances associated with the specified
-// destination security groups.
+// or IPv6 CIDR address ranges, or to the instances that are associated with
+// the specified destination security groups.
//
// You specify a protocol for each rule (for example, TCP). For the TCP and
// UDP protocols, you must also specify the destination port or port range.
@@ -2377,8 +2553,7 @@ func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupE
// Rule changes are propagated to affected instances as quickly as possible.
// However, a small delay might occur.
//
-// For more information about VPC security group limits, see Amazon VPC Limits
-// (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html).
+// For information about VPC security group quotas, see Amazon VPC quotas (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2447,17 +2622,16 @@ func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroup
output = &AuthorizeSecurityGroupIngressOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// AuthorizeSecurityGroupIngress API operation for Amazon Elastic Compute Cloud.
//
-// Adds the specified ingress rules to a security group.
+// Adds the specified inbound (ingress) rules to a security group.
//
// An inbound rule permits instances to receive traffic from the specified IPv4
-// or IPv6 CIDR address ranges, or from the instances associated with the specified
-// destination security groups.
+// or IPv6 CIDR address range, or from the instances that are associated with
+// the specified destination security groups.
//
// You specify a protocol for each rule (for example, TCP). For TCP and UDP,
// you must also specify the destination port or port range. For ICMP/ICMPv6,
@@ -2467,7 +2641,7 @@ func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroup
// Rule changes are propagated to instances within the security group as quickly
// as possible. However, a small delay might occur.
//
-// For more information about VPC security group limits, see Amazon VPC Limits
+// For more information about VPC security group quotas, see Amazon VPC quotas
// (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -3015,7 +3189,7 @@ func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstanc
// Marketplace.
//
// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3397,14 +3571,26 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out
// CopyImage API operation for Amazon Elastic Compute Cloud.
//
-// Initiates the copy of an AMI from the specified source Region to the current
-// Region. You specify the destination Region by using its endpoint when making
-// the request.
-//
-// Copies of encrypted backing snapshots for the AMI are encrypted. Copies of
-// unencrypted backing snapshots remain unencrypted, unless you set Encrypted
-// during the copy operation. You cannot create an unencrypted copy of an encrypted
-// backing snapshot.
+// Initiates the copy of an AMI. You can copy an AMI from one Region to another,
+// or from a Region to an AWS Outpost. You can't copy an AMI from an Outpost
+// to a Region, from one Outpost to another, or within the same Outpost. To
+// copy an AMI to another partition, see CreateStoreImageTask (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html).
+//
+// To copy an AMI from one Region to another, specify the source Region using
+// the SourceRegion parameter, and specify the destination Region using its
+// endpoint. Copies of encrypted backing snapshots for the AMI are encrypted.
+// Copies of unencrypted backing snapshots remain unencrypted, unless you set
+// Encrypted during the copy operation. You cannot create an unencrypted copy
+// of an encrypted backing snapshot.
+//
+// To copy an AMI from a Region to an Outpost, specify the source Region using
+// the SourceRegion parameter, and specify the ARN of the destination Outpost
+// using DestinationOutpostArn. Backing snapshots copied to an Outpost are encrypted
+// by default using the default encryption key for the Region, or a different
+// key that you specify in the request using KmsKeyId. Outposts do not support
+// unencrypted snapshots. For more information, Amazon EBS local snapshots on
+// Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#ami)
+// in the Amazon Elastic Compute Cloud User Guide.
//
// For more information about the prerequisites and limits when copying an AMI,
// see Copying an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html)
@@ -3483,23 +3669,30 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques
// CopySnapshot API operation for Amazon Elastic Compute Cloud.
//
// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon
-// S3. You can copy the snapshot within the same Region or from one Region to
-// another. You can use the snapshot to create EBS volumes or Amazon Machine
-// Images (AMIs).
-//
-// Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted
-// snapshots remain unencrypted, unless you enable encryption for the snapshot
-// copy operation. By default, encrypted snapshot copies use the default AWS
-// Key Management Service (AWS KMS) customer master key (CMK); however, you
-// can specify a different CMK.
-//
-// To copy an encrypted snapshot that has been shared from another account,
-// you must have permissions for the CMK used to encrypt the snapshot.
+// S3. You can copy a snapshot within the same Region, from one Region to another,
+// or from a Region to an Outpost. You can't copy a snapshot from an Outpost
+// to a Region, from one Outpost to another, or within the same Outpost.
+//
+// You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).
+//
+// When copying snapshots to a Region, copies of encrypted EBS snapshots remain
+// encrypted. Copies of unencrypted snapshots remain unencrypted, unless you
+// enable encryption for the snapshot copy operation. By default, encrypted
+// snapshot copies use the default Key Management Service (KMS) KMS key; however,
+// you can specify a different KMS key. To copy an encrypted snapshot that has
+// been shared from another account, you must have permissions for the KMS key
+// used to encrypt the snapshot.
+//
+// Snapshots copied to an Outpost are encrypted by default using the default
+// encryption key for the Region, or a different key that you specify in the
+// request using KmsKeyId. Outposts do not support unencrypted snapshots. For
+// more information, Amazon EBS local snapshots on Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#ami)
+// in the Amazon Elastic Compute Cloud User Guide.
//
// Snapshots created by copying another snapshot have an arbitrary volume ID
// that should not be used for any purpose.
//
-// For more information, see Copying an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html)
+// For more information, see Copy an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -3583,7 +3776,7 @@ func (c *EC2) CreateCapacityReservationRequest(input *CreateCapacityReservationI
// you ensure that you always have access to Amazon EC2 capacity when you need
// it, for as long as you need it. For more information, see Capacity Reservations
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Your request to create a Capacity Reservation could fail if Amazon EC2 does
// not have sufficient capacity to fulfill the request. If your request fails
@@ -3596,8 +3789,8 @@ func (c *EC2) CreateCapacityReservationRequest(input *CreateCapacityReservationI
// Instance limit for the selected instance type. If your request fails due
// to limit constraints, increase your On-Demand Instance limit for the required
// instance type and try again. For more information about increasing your instance
-// limits, see Amazon EC2 Service Limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// limits, see Amazon EC2 Service Quotas (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3673,7 +3866,7 @@ func (c *EC2) CreateCarrierGatewayRequest(input *CreateCarrierGatewayInput) (req
//
// Creates a carrier gateway. For more information about carrier gateways, see
// Carrier gateways (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#wavelength-carrier-gateway)
-// in the AWS Wavelength Developer Guide.
+// in the Amazon Web Services Wavelength Developer Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4005,7 +4198,7 @@ func (c *EC2) CreateDefaultSubnetRequest(input *CreateDefaultSubnetInput) (req *
//
// Creates a default subnet with a size /20 IPv4 CIDR block in the specified
// Availability Zone in your default VPC. You can have only one default subnet
-// per Availability Zone. For more information, see Creating a Default Subnet
+// per Availability Zone. For more information, see Creating a default subnet
// (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html#create-default-subnet)
// in the Amazon Virtual Private Cloud User Guide.
//
@@ -4083,7 +4276,7 @@ func (c *EC2) CreateDefaultVpcRequest(input *CreateDefaultVpcInput) (req *reques
//
// Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet
// in each Availability Zone. For more information about the components of a
-// default VPC, see Default VPC and Default Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html)
+// default VPC, see Default VPC and default subnets (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html)
// in the Amazon Virtual Private Cloud User Guide. You cannot specify the components
// of the default VPC yourself.
//
@@ -4204,7 +4397,7 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ
// only a DNS server that we provide (AmazonProvidedDNS). If you create a set
// of options, and if your VPC has an internet gateway, make sure to set the
// domain-name-servers option either to AmazonProvidedDNS or to a domain name
-// server of your choice. For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
+// server of your choice. For more information, see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -4362,7 +4555,7 @@ func (c *EC2) CreateFleetRequest(input *CreateFleetInput) (req *request.Request,
// that vary by instance type, AMI, Availability Zone, or subnet.
//
// For more information, see Launching an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4441,7 +4634,7 @@ func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Re
//
// Flow log data for a monitored network interface is recorded as flow log records,
// which are log events consisting of fields that describe the traffic flow.
-// For more information, see Flow Log Records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records)
+// For more information, see Flow log records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records)
// in the Amazon Virtual Private Cloud User Guide.
//
// When publishing to CloudWatch Logs, flow log records are published to a log
@@ -4645,6 +4838,103 @@ func (c *EC2) CreateImageWithContext(ctx aws.Context, input *CreateImageInput, o
return out, req.Send()
}
+const opCreateInstanceEventWindow = "CreateInstanceEventWindow"
+
+// CreateInstanceEventWindowRequest generates a "aws/request.Request" representing the
+// client's request for the CreateInstanceEventWindow operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateInstanceEventWindow for more information on using the CreateInstanceEventWindow
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateInstanceEventWindowRequest method.
+// req, resp := client.CreateInstanceEventWindowRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceEventWindow
+func (c *EC2) CreateInstanceEventWindowRequest(input *CreateInstanceEventWindowInput) (req *request.Request, output *CreateInstanceEventWindowOutput) {
+ op := &request.Operation{
+ Name: opCreateInstanceEventWindow,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateInstanceEventWindowInput{}
+ }
+
+ output = &CreateInstanceEventWindowOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateInstanceEventWindow API operation for Amazon Elastic Compute Cloud.
+//
+// Creates an event window in which scheduled events for the associated Amazon
+// EC2 instances can run.
+//
+// You can define either a set of time ranges or a cron expression when creating
+// the event window, but not both. All event window times are in UTC.
+//
+// You can create up to 200 event windows per Amazon Web Services Region.
+//
+// When you create the event window, targets (instance IDs, Dedicated Host IDs,
+// or tags) are not yet associated with it. To ensure that the event window
+// can be used, you must associate one or more targets with it by using the
+// AssociateInstanceEventWindow API.
+//
+// Event windows are applicable only for scheduled events that stop, reboot,
+// or terminate instances.
+//
+// Event windows are not applicable for:
+//
+// * Expedited scheduled events and network maintenance events.
+//
+// * Unscheduled maintenance such as AutoRecovery and unplanned reboots.
+//
+// For more information, see Define event windows for scheduled events (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/event-windows.html)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateInstanceEventWindow for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceEventWindow
+func (c *EC2) CreateInstanceEventWindow(input *CreateInstanceEventWindowInput) (*CreateInstanceEventWindowOutput, error) {
+ req, out := c.CreateInstanceEventWindowRequest(input)
+ return out, req.Send()
+}
+
+// CreateInstanceEventWindowWithContext is the same as CreateInstanceEventWindow with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateInstanceEventWindow for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateInstanceEventWindowWithContext(ctx aws.Context, input *CreateInstanceEventWindowInput, opts ...request.Option) (*CreateInstanceEventWindowOutput, error) {
+ req, out := c.CreateInstanceEventWindowRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opCreateInstanceExportTask = "CreateInstanceExportTask"
// CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the
@@ -4693,7 +4983,7 @@ func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInp
//
// For information about the supported operating systems, image formats, and
// known limitations for the types of instances you can export, see Exporting
-// an Instance as a VM Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport.html)
+// an instance as a VM Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport.html)
// in the VM Import/Export User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -4846,18 +5136,19 @@ func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Requ
// CreateKeyPair API operation for Amazon Elastic Compute Cloud.
//
-// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores
-// the public key and displays the private key for you to save to a file. The
-// private key is returned as an unencrypted PEM encoded PKCS#1 private key.
-// If a key with the specified name already exists, Amazon EC2 returns an error.
+// Creates an ED25519 or 2048-bit RSA key pair with the specified name. Amazon
+// EC2 stores the public key and displays the private key for you to save to
+// a file. The private key is returned as an unencrypted PEM encoded PKCS#1
+// private key. If a key with the specified name already exists, Amazon EC2
+// returns an error.
//
-// You can have up to five thousand key pairs per Region.
+// The key pair returned to you is available only in the Amazon Web Services
+// Region in which you create it. If you prefer, you can create your own key
+// pair using a third-party tool and upload it to any Region using ImportKeyPair.
//
-// The key pair returned to you is available only in the Region in which you
-// create it. If you prefer, you can create your own key pair using a third-party
-// tool and upload it to any Region using ImportKeyPair.
+// You can have up to 5,000 key pairs per Amazon Web Services Region.
//
-// For more information, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
+// For more information, see Amazon EC2 key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -5317,12 +5608,22 @@ func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *reques
// CreateNatGateway API operation for Amazon Elastic Compute Cloud.
//
-// Creates a NAT gateway in the specified public subnet. This action creates
-// a network interface in the specified subnet with a private IP address from
-// the IP address range of the subnet. Internet-bound traffic from a private
-// subnet can be routed to the NAT gateway, therefore enabling instances in
-// the private subnet to connect to the internet. For more information, see
-// NAT Gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html)
+// Creates a NAT gateway in the specified subnet. This action creates a network
+// interface in the specified subnet with a private IP address from the IP address
+// range of the subnet. You can create either a public NAT gateway or a private
+// NAT gateway.
+//
+// With a public NAT gateway, internet-bound traffic from a private subnet can
+// be routed to the NAT gateway, so that instances in a private subnet can connect
+// to the internet.
+//
+// With a private NAT gateway, private communication is routed across VPCs and
+// on-premises networks through a transit gateway or virtual private gateway.
+// Common use cases include running large workloads behind a small pool of allowlisted
+// IPv4 addresses, preserving private IPv4 addresses, and communicating between
+// overlapping networks.
+//
+// For more information, see NAT gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -5722,11 +6023,11 @@ func (c *EC2) CreateNetworkInterfacePermissionRequest(input *CreateNetworkInterf
// CreateNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud.
//
-// Grants an AWS-authorized account permission to attach the specified network
-// interface to an instance in their account.
+// Grants an Amazon Web Services-authorized account permission to attach the
+// specified network interface to an instance in their account.
//
-// You can grant permission to a single AWS account only, and only one account
-// at a time.
+// You can grant permission to a single Amazon Web Services account only, and
+// only one account at a time.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5811,7 +6112,7 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req
// in another partition.
//
// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5841,6 +6142,85 @@ func (c *EC2) CreatePlacementGroupWithContext(ctx aws.Context, input *CreatePlac
return out, req.Send()
}
+const opCreateReplaceRootVolumeTask = "CreateReplaceRootVolumeTask"
+
+// CreateReplaceRootVolumeTaskRequest generates a "aws/request.Request" representing the
+// client's request for the CreateReplaceRootVolumeTask operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateReplaceRootVolumeTask for more information on using the CreateReplaceRootVolumeTask
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateReplaceRootVolumeTaskRequest method.
+// req, resp := client.CreateReplaceRootVolumeTaskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReplaceRootVolumeTask
+func (c *EC2) CreateReplaceRootVolumeTaskRequest(input *CreateReplaceRootVolumeTaskInput) (req *request.Request, output *CreateReplaceRootVolumeTaskOutput) {
+ op := &request.Operation{
+ Name: opCreateReplaceRootVolumeTask,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateReplaceRootVolumeTaskInput{}
+ }
+
+ output = &CreateReplaceRootVolumeTaskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateReplaceRootVolumeTask API operation for Amazon Elastic Compute Cloud.
+//
+// Creates a root volume replacement task for an Amazon EC2 instance. The root
+// volume can either be restored to its initial launch state, or it can be restored
+// using a specific snapshot.
+//
+// For more information, see Replace a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-restoring-volume.html#replace-root)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateReplaceRootVolumeTask for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReplaceRootVolumeTask
+func (c *EC2) CreateReplaceRootVolumeTask(input *CreateReplaceRootVolumeTaskInput) (*CreateReplaceRootVolumeTaskOutput, error) {
+ req, out := c.CreateReplaceRootVolumeTaskRequest(input)
+ return out, req.Send()
+}
+
+// CreateReplaceRootVolumeTaskWithContext is the same as CreateReplaceRootVolumeTask with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateReplaceRootVolumeTask for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateReplaceRootVolumeTaskWithContext(ctx aws.Context, input *CreateReplaceRootVolumeTaskInput, opts ...request.Option) (*CreateReplaceRootVolumeTaskOutput, error) {
+ req, out := c.CreateReplaceRootVolumeTaskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opCreateReservedInstancesListing = "CreateReservedInstancesListing"
// CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the
@@ -5907,7 +6287,7 @@ func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstanc
// you can use the DescribeReservedInstancesListings operation.
//
// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5937,6 +6317,88 @@ func (c *EC2) CreateReservedInstancesListingWithContext(ctx aws.Context, input *
return out, req.Send()
}
+const opCreateRestoreImageTask = "CreateRestoreImageTask"
+
+// CreateRestoreImageTaskRequest generates a "aws/request.Request" representing the
+// client's request for the CreateRestoreImageTask operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateRestoreImageTask for more information on using the CreateRestoreImageTask
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateRestoreImageTaskRequest method.
+// req, resp := client.CreateRestoreImageTaskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRestoreImageTask
+func (c *EC2) CreateRestoreImageTaskRequest(input *CreateRestoreImageTaskInput) (req *request.Request, output *CreateRestoreImageTaskOutput) {
+ op := &request.Operation{
+ Name: opCreateRestoreImageTask,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateRestoreImageTaskInput{}
+ }
+
+ output = &CreateRestoreImageTaskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateRestoreImageTask API operation for Amazon Elastic Compute Cloud.
+//
+// Starts a task that restores an AMI from an S3 object that was previously
+// created by using CreateStoreImageTask (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html).
+//
+// To use this API, you must have the required permissions. For more information,
+// see Permissions for storing and restoring AMIs using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// For more information, see Store and restore an AMI using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateRestoreImageTask for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRestoreImageTask
+func (c *EC2) CreateRestoreImageTask(input *CreateRestoreImageTaskInput) (*CreateRestoreImageTaskOutput, error) {
+ req, out := c.CreateRestoreImageTaskRequest(input)
+ return out, req.Send()
+}
+
+// CreateRestoreImageTaskWithContext is the same as CreateRestoreImageTask with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateRestoreImageTask for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateRestoreImageTaskWithContext(ctx aws.Context, input *CreateRestoreImageTaskInput, opts ...request.Option) (*CreateRestoreImageTaskOutput, error) {
+ req, out := c.CreateRestoreImageTaskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opCreateRoute = "CreateRoute"
// CreateRouteRequest generates a "aws/request.Request" representing the
@@ -5999,7 +6461,7 @@ func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request,
// route in the list covers a smaller number of IP addresses and is therefore
// more specific, so we use that route to determine where to target the traffic.
//
-// For more information about route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
+// For more information about route tables, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6077,7 +6539,7 @@ func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *reques
// Creates a route table for the specified VPC. After you create a route table,
// you can add routes and associate the table with a subnet.
//
-// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
+// For more information, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6155,9 +6617,9 @@ func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *
// Creates a security group.
//
// A security group acts as a virtual firewall for your instance to control
-// inbound and outbound traffic. For more information, see Amazon EC2 Security
-// Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html)
-// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your
+// inbound and outbound traffic. For more information, see Amazon EC2 security
+// groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html)
+// in the Amazon Elastic Compute Cloud User Guide and Security groups for your
// VPC (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html)
// in the Amazon Virtual Private Cloud User Guide.
//
@@ -6255,28 +6717,35 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re
// snapshots for backups, to make copies of EBS volumes, and to save data before
// shutting down an instance.
//
-// When a snapshot is created, any AWS Marketplace product codes that are associated
-// with the source volume are propagated to the snapshot.
+// You can create snapshots of volumes in a Region and volumes on an Outpost.
+// If you create a snapshot of a volume in a Region, the snapshot must be stored
+// in the same Region as the volume. If you create a snapshot of a volume on
+// an Outpost, the snapshot can be stored on the same Outpost as the volume,
+// or in the Region for that Outpost.
+//
+// When a snapshot is created, any Amazon Web Services Marketplace product codes
+// that are associated with the source volume are propagated to the snapshot.
//
// You can take a snapshot of an attached volume that is in use. However, snapshots
-// only capture data that has been written to your EBS volume at the time the
-// snapshot command is issued; this might exclude any data that has been cached
-// by any applications or the operating system. If you can pause any file systems
-// on the volume long enough to take a snapshot, your snapshot should be complete.
-// However, if you cannot pause all file writes to the volume, you should unmount
-// the volume from within the instance, issue the snapshot command, and then
-// remount the volume to ensure a consistent and complete snapshot. You may
-// remount and use your volume while the snapshot status is pending.
-//
-// To create a snapshot for EBS volumes that serve as root devices, you should
-// stop the instance before taking the snapshot.
+// only capture data that has been written to your Amazon EBS volume at the
+// time the snapshot command is issued; this might exclude any data that has
+// been cached by any applications or the operating system. If you can pause
+// any file systems on the volume long enough to take a snapshot, your snapshot
+// should be complete. However, if you cannot pause all file writes to the volume,
+// you should unmount the volume from within the instance, issue the snapshot
+// command, and then remount the volume to ensure a consistent and complete
+// snapshot. You may remount and use your volume while the snapshot status is
+// pending.
+//
+// To create a snapshot for Amazon EBS volumes that serve as root devices, you
+// should stop the instance before taking the snapshot.
//
// Snapshots that are taken from encrypted volumes are automatically encrypted.
// Volumes that are created from encrypted snapshots are also automatically
// encrypted. Your encrypted volumes and any associated snapshots always remain
// protected.
//
-// You can tag your snapshots during creation. For more information, see Tagging
+// You can tag your snapshots during creation. For more information, see Tag
// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
@@ -6361,6 +6830,12 @@ func (c *EC2) CreateSnapshotsRequest(input *CreateSnapshotsInput) (req *request.
// will produce one snapshot each that is crash-consistent across the instance.
// Boot volumes can be excluded by changing the parameters.
//
+// You can create multi-volume snapshots of instances in a Region and instances
+// on an Outpost. If you create snapshots from an instance in a Region, the
+// snapshots must be stored in the same Region as the instance. If you create
+// snapshots from an instance on an Outpost, the snapshots can be stored on
+// the same Outpost as the instance, or in the Region for that Outpost.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6434,8 +6909,8 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub
// CreateSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud.
//
// Creates a data feed for Spot Instances, enabling you to view Spot Instance
-// usage logs. You can create one data feed per AWS account. For more information,
-// see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html)
+// usage logs. You can create one data feed per Amazon Web Services account.
+// For more information, see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html)
// in the Amazon EC2 User Guide for Linux Instances.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6466,6 +6941,87 @@ func (c *EC2) CreateSpotDatafeedSubscriptionWithContext(ctx aws.Context, input *
return out, req.Send()
}
+const opCreateStoreImageTask = "CreateStoreImageTask"
+
+// CreateStoreImageTaskRequest generates a "aws/request.Request" representing the
+// client's request for the CreateStoreImageTask operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateStoreImageTask for more information on using the CreateStoreImageTask
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateStoreImageTaskRequest method.
+// req, resp := client.CreateStoreImageTaskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateStoreImageTask
+func (c *EC2) CreateStoreImageTaskRequest(input *CreateStoreImageTaskInput) (req *request.Request, output *CreateStoreImageTaskOutput) {
+ op := &request.Operation{
+ Name: opCreateStoreImageTask,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateStoreImageTaskInput{}
+ }
+
+ output = &CreateStoreImageTaskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateStoreImageTask API operation for Amazon Elastic Compute Cloud.
+//
+// Stores an AMI as a single object in an S3 bucket.
+//
+// To use this API, you must have the required permissions. For more information,
+// see Permissions for storing and restoring AMIs using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// For more information, see Store and restore an AMI using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateStoreImageTask for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateStoreImageTask
+func (c *EC2) CreateStoreImageTask(input *CreateStoreImageTaskInput) (*CreateStoreImageTaskOutput, error) {
+ req, out := c.CreateStoreImageTaskRequest(input)
+ return out, req.Send()
+}
+
+// CreateStoreImageTaskWithContext is the same as CreateStoreImageTask with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateStoreImageTask for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateStoreImageTaskWithContext(ctx aws.Context, input *CreateStoreImageTaskInput, opts ...request.Option) (*CreateStoreImageTaskOutput, error) {
+ req, out := c.CreateStoreImageTaskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opCreateSubnet = "CreateSubnet"
// CreateSubnetRequest generates a "aws/request.Request" representing the
@@ -6520,8 +7076,8 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques
// If you've associated an IPv6 CIDR block with your VPC, you can create a subnet
// with an IPv6 CIDR block that uses a /64 prefix length.
//
-// AWS reserves both the first four and the last IPv4 address in each subnet's
-// CIDR block. They're not available for use.
+// Amazon Web Services reserves both the first four and the last IPv4 address
+// in each subnet's CIDR block. They're not available for use.
//
// If you add more than one subnet to a VPC, they're set up in a star topology
// with a logical router in the middle.
@@ -6530,7 +7086,7 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques
// It's therefore possible to have a subnet with no running instances (they're
// all stopped), but no remaining IP addresses available.
//
-// For more information about subnets, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
+// For more information about subnets, see Your VPC and subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6561,6 +7117,82 @@ func (c *EC2) CreateSubnetWithContext(ctx aws.Context, input *CreateSubnetInput,
return out, req.Send()
}
+const opCreateSubnetCidrReservation = "CreateSubnetCidrReservation"
+
+// CreateSubnetCidrReservationRequest generates a "aws/request.Request" representing the
+// client's request for the CreateSubnetCidrReservation operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateSubnetCidrReservation for more information on using the CreateSubnetCidrReservation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateSubnetCidrReservationRequest method.
+// req, resp := client.CreateSubnetCidrReservationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSubnetCidrReservation
+func (c *EC2) CreateSubnetCidrReservationRequest(input *CreateSubnetCidrReservationInput) (req *request.Request, output *CreateSubnetCidrReservationOutput) {
+ op := &request.Operation{
+ Name: opCreateSubnetCidrReservation,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateSubnetCidrReservationInput{}
+ }
+
+ output = &CreateSubnetCidrReservationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateSubnetCidrReservation API operation for Amazon Elastic Compute Cloud.
+//
+// Creates a subnet CIDR reservation. For information about subnet CIDR reservations,
+// see Subnet CIDR reservations (https://docs.aws.amazon.com/vpc/latest/userguide/subnet-cidr-reservation.html)
+// in the Amazon Virtual Private Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateSubnetCidrReservation for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSubnetCidrReservation
+func (c *EC2) CreateSubnetCidrReservation(input *CreateSubnetCidrReservationInput) (*CreateSubnetCidrReservationOutput, error) {
+ req, out := c.CreateSubnetCidrReservationRequest(input)
+ return out, req.Send()
+}
+
+// CreateSubnetCidrReservationWithContext is the same as CreateSubnetCidrReservation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateSubnetCidrReservation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateSubnetCidrReservationWithContext(ctx aws.Context, input *CreateSubnetCidrReservationInput, opts ...request.Option) (*CreateSubnetCidrReservationOutput, error) {
+ req, out := c.CreateSubnetCidrReservationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opCreateTags = "CreateTags"
// CreateTagsRequest generates a "aws/request.Request" representing the
@@ -7116,8 +7748,8 @@ func (c *EC2) CreateTransitGatewayConnectRequest(input *CreateTransitGatewayConn
// A Connect attachment is a GRE-based tunnel attachment that you can use to
// establish a connection between a transit gateway and an appliance.
//
-// A Connect attachment uses an existing VPC or AWS Direct Connect attachment
-// as the underlying transport mechanism.
+// A Connect attachment uses an existing VPC or Amazon Web Services Direct Connect
+// attachment as the underlying transport mechanism.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -7353,7 +7985,7 @@ func (c *EC2) CreateTransitGatewayPeeringAttachmentRequest(input *CreateTransitG
// Requests a transit gateway peering attachment between the specified transit
// gateway (requester) and a peer transit gateway (accepter). The transit gateways
// must be in different Regions. The peer transit gateway can be in your account
-// or a different AWS account.
+// or a different Amazon Web Services account.
//
// After you create the peering attachment, the owner of the accepter transit
// gateway must accept the attachment request.
@@ -7738,8 +8370,8 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques
// Zone.
//
// You can create a new empty volume or restore a volume from an EBS snapshot.
-// Any AWS Marketplace product codes from the snapshot are propagated to the
-// volume.
+// Any Amazon Web Services Marketplace product codes from the snapshot are propagated
+// to the volume.
//
// You can create encrypted volumes. Encrypted volumes must be attached to instances
// that support Amazon EBS encryption. Volumes that are created from encrypted
@@ -7747,11 +8379,11 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques
// EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
-// You can tag your volumes during creation. For more information, see Tagging
-// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
+// You can tag your volumes during creation. For more information, see Tag your
+// Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
-// For more information, see Creating an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html)
+// For more information, see Create an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -7829,7 +8461,7 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out
// Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can
// create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16
// netmask (65,536 IPv4 addresses). For more information about how large to
-// make your VPC, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
+// make your VPC, see Your VPC and subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// You can optionally request an IPv6 CIDR block for the VPC. You can request
@@ -7839,7 +8471,7 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out
//
// By default, each instance you launch in the VPC has the default DHCP options,
// which include only a default DNS server that we provide (AmazonProvidedDNS).
-// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
+// For more information, see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// You can specify the instance tenancy value for the VPC when you create it.
@@ -8188,9 +8820,9 @@ func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectio
//
// Requests a VPC peering connection between two VPCs: a requester VPC that
// you own and an accepter VPC with which to create the connection. The accepter
-// VPC can belong to another AWS account and can be in a different Region to
-// the requester VPC. The requester VPC and accepter VPC cannot have overlapping
-// CIDR blocks.
+// VPC can belong to another Amazon Web Services account and can be in a different
+// Region to the requester VPC. The requester VPC and accepter VPC cannot have
+// overlapping CIDR blocks.
//
// Limitations and rules apply to a VPC peering connection. For more information,
// see the limitations (https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-basics.html#vpc-peering-limitations)
@@ -9010,7 +9642,7 @@ func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Reques
// instant fleets.
//
// For more information, see Deleting an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -9188,6 +9820,83 @@ func (c *EC2) DeleteFpgaImageWithContext(ctx aws.Context, input *DeleteFpgaImage
return out, req.Send()
}
+const opDeleteInstanceEventWindow = "DeleteInstanceEventWindow"
+
+// DeleteInstanceEventWindowRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteInstanceEventWindow operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteInstanceEventWindow for more information on using the DeleteInstanceEventWindow
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteInstanceEventWindowRequest method.
+// req, resp := client.DeleteInstanceEventWindowRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInstanceEventWindow
+func (c *EC2) DeleteInstanceEventWindowRequest(input *DeleteInstanceEventWindowInput) (req *request.Request, output *DeleteInstanceEventWindowOutput) {
+ op := &request.Operation{
+ Name: opDeleteInstanceEventWindow,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteInstanceEventWindowInput{}
+ }
+
+ output = &DeleteInstanceEventWindowOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteInstanceEventWindow API operation for Amazon Elastic Compute Cloud.
+//
+// Deletes the specified event window.
+//
+// For more information, see Define event windows for scheduled events (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/event-windows.html)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DeleteInstanceEventWindow for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInstanceEventWindow
+func (c *EC2) DeleteInstanceEventWindow(input *DeleteInstanceEventWindowInput) (*DeleteInstanceEventWindowOutput, error) {
+ req, out := c.DeleteInstanceEventWindowRequest(input)
+ return out, req.Send()
+}
+
+// DeleteInstanceEventWindowWithContext is the same as DeleteInstanceEventWindow with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteInstanceEventWindow for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DeleteInstanceEventWindowWithContext(ctx aws.Context, input *DeleteInstanceEventWindowInput, opts ...request.Option) (*DeleteInstanceEventWindowOutput, error) {
+ req, out := c.DeleteInstanceEventWindowRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDeleteInternetGateway = "DeleteInternetGateway"
// DeleteInternetGatewayRequest generates a "aws/request.Request" representing the
@@ -9758,9 +10467,10 @@ func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *reques
// DeleteNatGateway API operation for Amazon Elastic Compute Cloud.
//
-// Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its
-// Elastic IP address, but does not release the address from your account. Deleting
-// a NAT gateway does not delete any NAT gateway routes in your route tables.
+// Deletes the specified NAT gateway. Deleting a public NAT gateway disassociates
+// its Elastic IP address, but does not release the address from your account.
+// Deleting a NAT gateway does not delete any NAT gateway routes in your route
+// tables.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -10291,7 +11001,7 @@ func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req
// Deletes the specified placement group. You must terminate all instances in
// the placement group before you can delete the placement group. For more information,
// see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -10684,7 +11394,7 @@ func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Re
// a registered AMI. You must first de-register the AMI before you can delete
// the snapshot.
//
-// For more information, see Deleting an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html)
+// For more information, see Delete an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -10866,6 +11576,80 @@ func (c *EC2) DeleteSubnetWithContext(ctx aws.Context, input *DeleteSubnetInput,
return out, req.Send()
}
+const opDeleteSubnetCidrReservation = "DeleteSubnetCidrReservation"
+
+// DeleteSubnetCidrReservationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteSubnetCidrReservation operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteSubnetCidrReservation for more information on using the DeleteSubnetCidrReservation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteSubnetCidrReservationRequest method.
+// req, resp := client.DeleteSubnetCidrReservationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSubnetCidrReservation
+func (c *EC2) DeleteSubnetCidrReservationRequest(input *DeleteSubnetCidrReservationInput) (req *request.Request, output *DeleteSubnetCidrReservationOutput) {
+ op := &request.Operation{
+ Name: opDeleteSubnetCidrReservation,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteSubnetCidrReservationInput{}
+ }
+
+ output = &DeleteSubnetCidrReservationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteSubnetCidrReservation API operation for Amazon Elastic Compute Cloud.
+//
+// Deletes a subnet CIDR reservation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DeleteSubnetCidrReservation for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSubnetCidrReservation
+func (c *EC2) DeleteSubnetCidrReservation(input *DeleteSubnetCidrReservationInput) (*DeleteSubnetCidrReservationOutput, error) {
+ req, out := c.DeleteSubnetCidrReservationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteSubnetCidrReservationWithContext is the same as DeleteSubnetCidrReservation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteSubnetCidrReservation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DeleteSubnetCidrReservationWithContext(ctx aws.Context, input *DeleteSubnetCidrReservationInput, opts ...request.Option) (*DeleteSubnetCidrReservationOutput, error) {
+ req, out := c.DeleteSubnetCidrReservationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDeleteTags = "DeleteTags"
// DeleteTagsRequest generates a "aws/request.Request" representing the
@@ -11967,7 +12751,7 @@ func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Reques
//
// The volume can remain in the deleting state for several minutes.
//
-// For more information, see Deleting an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html)
+// For more information, see Delete an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -12272,12 +13056,26 @@ func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *re
// DeleteVpcEndpoints API operation for Amazon Elastic Compute Cloud.
//
-// Deletes one or more specified VPC endpoints. Deleting a gateway endpoint
-// also deletes the endpoint routes in the route tables that were associated
-// with the endpoint. Deleting an interface endpoint or a Gateway Load Balancer
-// endpoint deletes the endpoint network interfaces. Gateway Load Balancer endpoints
-// can only be deleted if the routes that are associated with the endpoint are
-// deleted.
+// Deletes one or more specified VPC endpoints. You can delete any of the following
+// types of VPC endpoints.
+//
+// * Gateway endpoint,
+//
+// * Gateway Load Balancer endpoint,
+//
+// * Interface endpoint
+//
+// The following rules apply when you delete a VPC endpoint:
+//
+// * When you delete a gateway endpoint, we delete the endpoint routes in
+// the route tables that are associated with the endpoint.
+//
+// * When you delete a Gateway Load Balancer endpoint, we delete the endpoint
+// network interfaces. You can only delete Gateway Load Balancer endpoints
+// when the routes that are associated with the endpoint are deleted.
+//
+// * When you delete an interface endpoint, we delete the endpoint network
+// interfaces.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -12674,8 +13472,8 @@ func (c *EC2) DeprovisionByoipCidrRequest(input *DeprovisionByoipCidrInput) (req
// DeprovisionByoipCidr API operation for Amazon Elastic Compute Cloud.
//
// Releases the specified address range that you provisioned for use with your
-// AWS resources through bring your own IP addresses (BYOIP) and deletes the
-// corresponding address pool.
+// Amazon Web Services resources through bring your own IP addresses (BYOIP)
+// and deletes the corresponding address pool.
//
// Before you can release an address range, you must stop advertising it using
// WithdrawByoipCidr and you must not have any IP addresses allocated from its
@@ -12836,6 +13634,8 @@ func (c *EC2) DeregisterInstanceEventNotificationAttributesRequest(input *Deregi
// DeregisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud.
//
+// c
+//
// Deregisters tag keys to prevent tags that have the specified tag keys from
// being included in scheduled event notifications for resources in the Region.
//
@@ -13189,6 +13989,139 @@ func (c *EC2) DescribeAddressesWithContext(ctx aws.Context, input *DescribeAddre
return out, req.Send()
}
+const opDescribeAddressesAttribute = "DescribeAddressesAttribute"
+
+// DescribeAddressesAttributeRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeAddressesAttribute operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeAddressesAttribute for more information on using the DescribeAddressesAttribute
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeAddressesAttributeRequest method.
+// req, resp := client.DescribeAddressesAttributeRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddressesAttribute
+func (c *EC2) DescribeAddressesAttributeRequest(input *DescribeAddressesAttributeInput) (req *request.Request, output *DescribeAddressesAttributeOutput) {
+ op := &request.Operation{
+ Name: opDescribeAddressesAttribute,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeAddressesAttributeInput{}
+ }
+
+ output = &DescribeAddressesAttributeOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeAddressesAttribute API operation for Amazon Elastic Compute Cloud.
+//
+// Describes the attributes of the specified Elastic IP addresses. For requirements,
+// see Using reverse DNS for email applications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html#Using_Elastic_Addressing_Reverse_DNS).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeAddressesAttribute for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddressesAttribute
+func (c *EC2) DescribeAddressesAttribute(input *DescribeAddressesAttributeInput) (*DescribeAddressesAttributeOutput, error) {
+ req, out := c.DescribeAddressesAttributeRequest(input)
+ return out, req.Send()
+}
+
+// DescribeAddressesAttributeWithContext is the same as DescribeAddressesAttribute with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeAddressesAttribute for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeAddressesAttributeWithContext(ctx aws.Context, input *DescribeAddressesAttributeInput, opts ...request.Option) (*DescribeAddressesAttributeOutput, error) {
+ req, out := c.DescribeAddressesAttributeRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeAddressesAttributePages iterates over the pages of a DescribeAddressesAttribute operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeAddressesAttribute method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeAddressesAttribute operation.
+// pageNum := 0
+// err := client.DescribeAddressesAttributePages(params,
+// func(page *ec2.DescribeAddressesAttributeOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *EC2) DescribeAddressesAttributePages(input *DescribeAddressesAttributeInput, fn func(*DescribeAddressesAttributeOutput, bool) bool) error {
+ return c.DescribeAddressesAttributePagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeAddressesAttributePagesWithContext same as DescribeAddressesAttributePages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeAddressesAttributePagesWithContext(ctx aws.Context, input *DescribeAddressesAttributeInput, fn func(*DescribeAddressesAttributeOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeAddressesAttributeInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeAddressesAttributeRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeAddressesAttributeOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeAggregateIdFormat = "DescribeAggregateIdFormat"
// DescribeAggregateIdFormatRequest generates a "aws/request.Request" representing the
@@ -13622,7 +14555,8 @@ func (c *EC2) DescribeCapacityReservationsRequest(input *DescribeCapacityReserva
// DescribeCapacityReservations API operation for Amazon Elastic Compute Cloud.
//
// Describes one or more of your Capacity Reservations. The results describe
-// only the Capacity Reservations in the AWS Region that you're currently using.
+// only the Capacity Reservations in the Amazon Web Services Region that you're
+// currently using.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -14972,7 +15906,7 @@ func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *
//
// Describes one or more of your DHCP options sets.
//
-// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
+// For more information, see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -15652,6 +16586,9 @@ func (c *EC2) DescribeFleetHistoryRequest(input *DescribeFleetHistoryInput) (req
// This ensures that you can query by the last evaluated time and not miss a
// recorded event. EC2 Fleet events are available for 48 hours.
//
+// For more information, see Monitoring your EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html#monitor-ec2-fleet)
+// in the Amazon EC2 User Guide.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -15726,6 +16663,9 @@ func (c *EC2) DescribeFleetInstancesRequest(input *DescribeFleetInstancesInput)
//
// Describes the running instances for the specified EC2 Fleet.
//
+// For more information, see Monitoring your EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html#monitor-ec2-fleet)
+// in the Amazon EC2 User Guide.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -15806,6 +16746,9 @@ func (c *EC2) DescribeFleetsRequest(input *DescribeFleetsInput) (req *request.Re
//
// Describes the specified EC2 Fleets or all of your EC2 Fleets.
//
+// For more information, see Monitoring your EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html#monitor-ec2-fleet)
+// in the Amazon EC2 User Guide.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -16285,8 +17228,8 @@ func (c *EC2) DescribeHostReservationOfferingsRequest(input *DescribeHostReserva
// Hosts. When purchasing an offering, ensure that the instance family and Region
// of the offering matches that of the Dedicated Hosts with which it is to be
// associated. For more information about supported instance types, see Dedicated
-// Hosts Overview (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// Hosts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html)
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -17078,8 +18021,8 @@ func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re
//
// Recently deregistered images appear in the returned results for a short interval
// and then return empty results. After all instances that reference a deregistered
-// AMI are terminated, specifying the ID of the image results in an error indicating
-// that the AMI ID cannot be found.
+// AMI are terminated, specifying the ID of the image will eventually return
+// an error indicating that the AMI ID cannot be found.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -17525,7 +18468,7 @@ func (c *EC2) DescribeInstanceCreditSpecificationsRequest(input *DescribeInstanc
// the call works normally.
//
// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -17682,6 +18625,148 @@ func (c *EC2) DescribeInstanceEventNotificationAttributesWithContext(ctx aws.Con
return out, req.Send()
}
+const opDescribeInstanceEventWindows = "DescribeInstanceEventWindows"
+
+// DescribeInstanceEventWindowsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeInstanceEventWindows operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeInstanceEventWindows for more information on using the DescribeInstanceEventWindows
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeInstanceEventWindowsRequest method.
+// req, resp := client.DescribeInstanceEventWindowsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventWindows
+func (c *EC2) DescribeInstanceEventWindowsRequest(input *DescribeInstanceEventWindowsInput) (req *request.Request, output *DescribeInstanceEventWindowsOutput) {
+ op := &request.Operation{
+ Name: opDescribeInstanceEventWindows,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeInstanceEventWindowsInput{}
+ }
+
+ output = &DescribeInstanceEventWindowsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeInstanceEventWindows API operation for Amazon Elastic Compute Cloud.
+//
+// Describes the specified event windows or all event windows.
+//
+// If you specify event window IDs, the output includes information for only
+// the specified event windows. If you specify filters, the output includes
+// information for only those event windows that meet the filter criteria. If
+// you do not specify event windows IDs or filters, the output includes information
+// for all event windows, which can affect performance. We recommend that you
+// use pagination to ensure that the operation returns quickly and successfully.
+//
+// For more information, see Define event windows for scheduled events (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/event-windows.html)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeInstanceEventWindows for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventWindows
+func (c *EC2) DescribeInstanceEventWindows(input *DescribeInstanceEventWindowsInput) (*DescribeInstanceEventWindowsOutput, error) {
+ req, out := c.DescribeInstanceEventWindowsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeInstanceEventWindowsWithContext is the same as DescribeInstanceEventWindows with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeInstanceEventWindows for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeInstanceEventWindowsWithContext(ctx aws.Context, input *DescribeInstanceEventWindowsInput, opts ...request.Option) (*DescribeInstanceEventWindowsOutput, error) {
+ req, out := c.DescribeInstanceEventWindowsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeInstanceEventWindowsPages iterates over the pages of a DescribeInstanceEventWindows operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeInstanceEventWindows method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeInstanceEventWindows operation.
+// pageNum := 0
+// err := client.DescribeInstanceEventWindowsPages(params,
+// func(page *ec2.DescribeInstanceEventWindowsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *EC2) DescribeInstanceEventWindowsPages(input *DescribeInstanceEventWindowsInput, fn func(*DescribeInstanceEventWindowsOutput, bool) bool) error {
+ return c.DescribeInstanceEventWindowsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeInstanceEventWindowsPagesWithContext same as DescribeInstanceEventWindowsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeInstanceEventWindowsPagesWithContext(ctx aws.Context, input *DescribeInstanceEventWindowsInput, fn func(*DescribeInstanceEventWindowsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeInstanceEventWindowsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeInstanceEventWindowsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeInstanceEventWindowsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeInstanceStatus = "DescribeInstanceStatus"
// DescribeInstanceStatusRequest generates a "aws/request.Request" representing the
@@ -17742,18 +18827,18 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput)
// to identify hardware and software issues. For more information, see Status
// checks for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html)
// and Troubleshooting instances with failed status checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// * Scheduled events - Amazon EC2 can schedule events (such as reboot, stop,
// or terminate) for your instances related to hardware issues, software
// updates, or system maintenance. For more information, see Scheduled events
// for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// * Instance state - You can manage your instances from the moment you launch
// them through their termination. For more information, see Instance lifecycle
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -18563,7 +19648,7 @@ func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *reques
//
// Describes the specified key pairs or all of your key pairs.
//
-// For more information about key pairs, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
+// For more information about key pairs, see Amazon EC2 key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -19707,7 +20792,8 @@ func (c *EC2) DescribeManagedPrefixListsRequest(input *DescribeManagedPrefixList
// DescribeManagedPrefixLists API operation for Amazon Elastic Compute Cloud.
//
-// Describes your managed prefix lists and any AWS-managed prefix lists.
+// Describes your managed prefix lists and any Amazon Web Services-managed prefix
+// lists.
//
// To view the entries for your prefix list, use GetManagedPrefixListEntries.
//
@@ -20841,7 +21927,7 @@ func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput
//
// Describes the specified placement groups or all of your placement groups.
// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -20921,9 +22007,9 @@ func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *
// DescribePrefixLists API operation for Amazon Elastic Compute Cloud.
//
-// Describes available AWS services in a prefix list format, which includes
-// the prefix list name and prefix list ID of the service and the IP address
-// range for the service.
+// Describes available Amazon Web Services services in a prefix list format,
+// which includes the prefix list name and prefix list ID of the service and
+// the IP address range for the service.
//
// We recommend that you use DescribeManagedPrefixLists instead.
//
@@ -21366,6 +22452,140 @@ func (c *EC2) DescribeRegionsWithContext(ctx aws.Context, input *DescribeRegions
return out, req.Send()
}
+const opDescribeReplaceRootVolumeTasks = "DescribeReplaceRootVolumeTasks"
+
+// DescribeReplaceRootVolumeTasksRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeReplaceRootVolumeTasks operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeReplaceRootVolumeTasks for more information on using the DescribeReplaceRootVolumeTasks
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeReplaceRootVolumeTasksRequest method.
+// req, resp := client.DescribeReplaceRootVolumeTasksRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReplaceRootVolumeTasks
+func (c *EC2) DescribeReplaceRootVolumeTasksRequest(input *DescribeReplaceRootVolumeTasksInput) (req *request.Request, output *DescribeReplaceRootVolumeTasksOutput) {
+ op := &request.Operation{
+ Name: opDescribeReplaceRootVolumeTasks,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeReplaceRootVolumeTasksInput{}
+ }
+
+ output = &DescribeReplaceRootVolumeTasksOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeReplaceRootVolumeTasks API operation for Amazon Elastic Compute Cloud.
+//
+// Describes a root volume replacement task. For more information, see Replace
+// a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-restoring-volume.html#replace-root)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeReplaceRootVolumeTasks for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReplaceRootVolumeTasks
+func (c *EC2) DescribeReplaceRootVolumeTasks(input *DescribeReplaceRootVolumeTasksInput) (*DescribeReplaceRootVolumeTasksOutput, error) {
+ req, out := c.DescribeReplaceRootVolumeTasksRequest(input)
+ return out, req.Send()
+}
+
+// DescribeReplaceRootVolumeTasksWithContext is the same as DescribeReplaceRootVolumeTasks with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeReplaceRootVolumeTasks for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeReplaceRootVolumeTasksWithContext(ctx aws.Context, input *DescribeReplaceRootVolumeTasksInput, opts ...request.Option) (*DescribeReplaceRootVolumeTasksOutput, error) {
+ req, out := c.DescribeReplaceRootVolumeTasksRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeReplaceRootVolumeTasksPages iterates over the pages of a DescribeReplaceRootVolumeTasks operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeReplaceRootVolumeTasks method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeReplaceRootVolumeTasks operation.
+// pageNum := 0
+// err := client.DescribeReplaceRootVolumeTasksPages(params,
+// func(page *ec2.DescribeReplaceRootVolumeTasksOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *EC2) DescribeReplaceRootVolumeTasksPages(input *DescribeReplaceRootVolumeTasksInput, fn func(*DescribeReplaceRootVolumeTasksOutput, bool) bool) error {
+ return c.DescribeReplaceRootVolumeTasksPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeReplaceRootVolumeTasksPagesWithContext same as DescribeReplaceRootVolumeTasksPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeReplaceRootVolumeTasksPagesWithContext(ctx aws.Context, input *DescribeReplaceRootVolumeTasksInput, fn func(*DescribeReplaceRootVolumeTasksOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeReplaceRootVolumeTasksInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeReplaceRootVolumeTasksRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeReplaceRootVolumeTasksOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeReservedInstances = "DescribeReservedInstances"
// DescribeReservedInstancesRequest generates a "aws/request.Request" representing the
@@ -21413,7 +22633,7 @@ func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesI
// Describes one or more of the Reserved Instances that you purchased.
//
// For more information about Reserved Instances, see Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -21508,7 +22728,7 @@ func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedIn
// that you purchase.
//
// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -21594,7 +22814,7 @@ func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReser
// about the specific modification is returned.
//
// For more information, see Modifying Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -21737,7 +22957,7 @@ func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedI
// that you do not purchase your own Reserved Instances.
//
// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -21876,7 +23096,7 @@ func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *
// with the main route table. This command does not return the subnet ID for
// implicit associations.
//
-// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
+// For more information, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -22306,6 +23526,138 @@ func (c *EC2) DescribeSecurityGroupReferencesWithContext(ctx aws.Context, input
return out, req.Send()
}
+const opDescribeSecurityGroupRules = "DescribeSecurityGroupRules"
+
+// DescribeSecurityGroupRulesRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeSecurityGroupRules operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeSecurityGroupRules for more information on using the DescribeSecurityGroupRules
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeSecurityGroupRulesRequest method.
+// req, resp := client.DescribeSecurityGroupRulesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupRules
+func (c *EC2) DescribeSecurityGroupRulesRequest(input *DescribeSecurityGroupRulesInput) (req *request.Request, output *DescribeSecurityGroupRulesOutput) {
+ op := &request.Operation{
+ Name: opDescribeSecurityGroupRules,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeSecurityGroupRulesInput{}
+ }
+
+ output = &DescribeSecurityGroupRulesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeSecurityGroupRules API operation for Amazon Elastic Compute Cloud.
+//
+// Describes one or more of your security group rules.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeSecurityGroupRules for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupRules
+func (c *EC2) DescribeSecurityGroupRules(input *DescribeSecurityGroupRulesInput) (*DescribeSecurityGroupRulesOutput, error) {
+ req, out := c.DescribeSecurityGroupRulesRequest(input)
+ return out, req.Send()
+}
+
+// DescribeSecurityGroupRulesWithContext is the same as DescribeSecurityGroupRules with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeSecurityGroupRules for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeSecurityGroupRulesWithContext(ctx aws.Context, input *DescribeSecurityGroupRulesInput, opts ...request.Option) (*DescribeSecurityGroupRulesOutput, error) {
+ req, out := c.DescribeSecurityGroupRulesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeSecurityGroupRulesPages iterates over the pages of a DescribeSecurityGroupRules operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeSecurityGroupRules method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeSecurityGroupRules operation.
+// pageNum := 0
+// err := client.DescribeSecurityGroupRulesPages(params,
+// func(page *ec2.DescribeSecurityGroupRulesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *EC2) DescribeSecurityGroupRulesPages(input *DescribeSecurityGroupRulesInput, fn func(*DescribeSecurityGroupRulesOutput, bool) bool) error {
+ return c.DescribeSecurityGroupRulesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeSecurityGroupRulesPagesWithContext same as DescribeSecurityGroupRulesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeSecurityGroupRulesPagesWithContext(ctx aws.Context, input *DescribeSecurityGroupRulesInput, fn func(*DescribeSecurityGroupRulesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeSecurityGroupRulesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeSecurityGroupRulesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeSecurityGroupRulesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeSecurityGroups = "DescribeSecurityGroups"
// DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the
@@ -22359,9 +23711,9 @@ func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput)
// Describes the specified security groups or all of your security groups.
//
// A security group is for use with instances either in the EC2-Classic platform
-// or in a specific VPC. For more information, see Amazon EC2 Security Groups
+// or in a specific VPC. For more information, see Amazon EC2 security groups
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html)
-// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your
+// in the Amazon Elastic Compute Cloud User Guide and Security groups for your
// VPC (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html)
// in the Amazon Virtual Private Cloud User Guide.
//
@@ -22577,25 +23929,25 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ
// snapshots available to you.
//
// The snapshots available to you include public snapshots, private snapshots
-// that you own, and private snapshots owned by other AWS accounts for which
-// you have explicit create volume permissions.
+// that you own, and private snapshots owned by other Amazon Web Services accounts
+// for which you have explicit create volume permissions.
//
// The create volume permissions fall into the following categories:
//
// * public: The owner of the snapshot granted create volume permissions
-// for the snapshot to the all group. All AWS accounts have create volume
-// permissions for these snapshots.
+// for the snapshot to the all group. All Amazon Web Services accounts have
+// create volume permissions for these snapshots.
//
// * explicit: The owner of the snapshot granted create volume permissions
-// to a specific AWS account.
+// to a specific Amazon Web Services account.
//
-// * implicit: An AWS account has implicit create volume permissions for
-// all snapshots it owns.
+// * implicit: An Amazon Web Services account has implicit create volume
+// permissions for all snapshots it owns.
//
// The list of snapshots returned can be filtered by specifying snapshot IDs,
-// snapshot owners, or AWS accounts with create volume permissions. If no options
-// are specified, Amazon EC2 returns all snapshots for which you have create
-// volume permissions.
+// snapshot owners, or Amazon Web Services accounts with create volume permissions.
+// If no options are specified, Amazon EC2 returns all snapshots for which you
+// have create volume permissions.
//
// If you specify one or more snapshot IDs, only snapshots that have the specified
// IDs are returned. If you specify an invalid snapshot ID, an error is returned.
@@ -22604,13 +23956,14 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ
//
// If you specify one or more snapshot owners using the OwnerIds option, only
// snapshots from the specified owners and for which you have access are returned.
-// The results can include the AWS account IDs of the specified owners, amazon
-// for snapshots owned by Amazon, or self for snapshots that you own.
+// The results can include the Amazon Web Services account IDs of the specified
+// owners, amazon for snapshots owned by Amazon, or self for snapshots that
+// you own.
//
// If you specify a list of restorable users, only snapshots with create snapshot
-// permissions for those users are returned. You can specify AWS account IDs
-// (if you own the snapshots), self for snapshots for which you own or have
-// explicit permissions, or all for public snapshots.
+// permissions for those users are returned. You can specify Amazon Web Services
+// account IDs (if you own the snapshots), self for snapshots for which you
+// own or have explicit permissions, or all for public snapshots.
//
// If you are describing a long list of snapshots, we recommend that you paginate
// the output to make the list more manageable. The MaxResults parameter sets
@@ -23270,10 +24623,10 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp
// pricing history (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html)
// in the Amazon EC2 User Guide for Linux Instances.
//
-// When you specify a start and end time, this operation returns the prices
-// of the instance types within the time range that you specified and the time
-// when the price changed. The price is valid within the time period that you
-// specified; the response merely indicates the last time that the price changed.
+// When you specify a start and end time, the operation returns the prices of
+// the instance types within that time range. It also returns the last price
+// change before the start time, which is the effective price as of the start
+// time.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -23490,6 +24843,154 @@ func (c *EC2) DescribeStaleSecurityGroupsPagesWithContext(ctx aws.Context, input
return p.Err()
}
+const opDescribeStoreImageTasks = "DescribeStoreImageTasks"
+
+// DescribeStoreImageTasksRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeStoreImageTasks operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeStoreImageTasks for more information on using the DescribeStoreImageTasks
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeStoreImageTasksRequest method.
+// req, resp := client.DescribeStoreImageTasksRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStoreImageTasks
+func (c *EC2) DescribeStoreImageTasksRequest(input *DescribeStoreImageTasksInput) (req *request.Request, output *DescribeStoreImageTasksOutput) {
+ op := &request.Operation{
+ Name: opDescribeStoreImageTasks,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeStoreImageTasksInput{}
+ }
+
+ output = &DescribeStoreImageTasksOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeStoreImageTasks API operation for Amazon Elastic Compute Cloud.
+//
+// Describes the progress of the AMI store tasks. You can describe the store
+// tasks for specified AMIs. If you don't specify the AMIs, you get a paginated
+// list of store tasks from the last 31 days.
+//
+// For each AMI task, the response indicates if the task is InProgress, Completed,
+// or Failed. For tasks InProgress, the response shows the estimated progress
+// as a percentage.
+//
+// Tasks are listed in reverse chronological order. Currently, only tasks from
+// the past 31 days can be viewed.
+//
+// To use this API, you must have the required permissions. For more information,
+// see Permissions for storing and restoring AMIs using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// For more information, see Store and restore an AMI using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeStoreImageTasks for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStoreImageTasks
+func (c *EC2) DescribeStoreImageTasks(input *DescribeStoreImageTasksInput) (*DescribeStoreImageTasksOutput, error) {
+ req, out := c.DescribeStoreImageTasksRequest(input)
+ return out, req.Send()
+}
+
+// DescribeStoreImageTasksWithContext is the same as DescribeStoreImageTasks with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeStoreImageTasks for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeStoreImageTasksWithContext(ctx aws.Context, input *DescribeStoreImageTasksInput, opts ...request.Option) (*DescribeStoreImageTasksOutput, error) {
+ req, out := c.DescribeStoreImageTasksRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeStoreImageTasksPages iterates over the pages of a DescribeStoreImageTasks operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeStoreImageTasks method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeStoreImageTasks operation.
+// pageNum := 0
+// err := client.DescribeStoreImageTasksPages(params,
+// func(page *ec2.DescribeStoreImageTasksOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *EC2) DescribeStoreImageTasksPages(input *DescribeStoreImageTasksInput, fn func(*DescribeStoreImageTasksOutput, bool) bool) error {
+ return c.DescribeStoreImageTasksPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeStoreImageTasksPagesWithContext same as DescribeStoreImageTasksPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeStoreImageTasksPagesWithContext(ctx aws.Context, input *DescribeStoreImageTasksInput, fn func(*DescribeStoreImageTasksOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeStoreImageTasksInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeStoreImageTasksRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeStoreImageTasksOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeSubnets = "DescribeSubnets"
// DescribeSubnetsRequest generates a "aws/request.Request" representing the
@@ -23542,7 +25043,7 @@ func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.
//
// Describes one or more of your subnets.
//
-// For more information, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
+// For more information, see Your VPC and subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -25219,6 +26720,142 @@ func (c *EC2) DescribeTransitGatewaysPagesWithContext(ctx aws.Context, input *De
return p.Err()
}
+const opDescribeTrunkInterfaceAssociations = "DescribeTrunkInterfaceAssociations"
+
+// DescribeTrunkInterfaceAssociationsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeTrunkInterfaceAssociations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeTrunkInterfaceAssociations for more information on using the DescribeTrunkInterfaceAssociations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeTrunkInterfaceAssociationsRequest method.
+// req, resp := client.DescribeTrunkInterfaceAssociationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrunkInterfaceAssociations
+func (c *EC2) DescribeTrunkInterfaceAssociationsRequest(input *DescribeTrunkInterfaceAssociationsInput) (req *request.Request, output *DescribeTrunkInterfaceAssociationsOutput) {
+ op := &request.Operation{
+ Name: opDescribeTrunkInterfaceAssociations,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeTrunkInterfaceAssociationsInput{}
+ }
+
+ output = &DescribeTrunkInterfaceAssociationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeTrunkInterfaceAssociations API operation for Amazon Elastic Compute Cloud.
+//
+//
+// This API action is currently in limited preview only. If you are interested
+// in using this feature, contact your account manager.
+//
+// Describes one or more network interface trunk associations.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeTrunkInterfaceAssociations for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrunkInterfaceAssociations
+func (c *EC2) DescribeTrunkInterfaceAssociations(input *DescribeTrunkInterfaceAssociationsInput) (*DescribeTrunkInterfaceAssociationsOutput, error) {
+ req, out := c.DescribeTrunkInterfaceAssociationsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeTrunkInterfaceAssociationsWithContext is the same as DescribeTrunkInterfaceAssociations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeTrunkInterfaceAssociations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeTrunkInterfaceAssociationsWithContext(ctx aws.Context, input *DescribeTrunkInterfaceAssociationsInput, opts ...request.Option) (*DescribeTrunkInterfaceAssociationsOutput, error) {
+ req, out := c.DescribeTrunkInterfaceAssociationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeTrunkInterfaceAssociationsPages iterates over the pages of a DescribeTrunkInterfaceAssociations operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeTrunkInterfaceAssociations method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeTrunkInterfaceAssociations operation.
+// pageNum := 0
+// err := client.DescribeTrunkInterfaceAssociationsPages(params,
+// func(page *ec2.DescribeTrunkInterfaceAssociationsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *EC2) DescribeTrunkInterfaceAssociationsPages(input *DescribeTrunkInterfaceAssociationsInput, fn func(*DescribeTrunkInterfaceAssociationsOutput, bool) bool) error {
+ return c.DescribeTrunkInterfaceAssociationsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeTrunkInterfaceAssociationsPagesWithContext same as DescribeTrunkInterfaceAssociationsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeTrunkInterfaceAssociationsPagesWithContext(ctx aws.Context, input *DescribeTrunkInterfaceAssociationsInput, fn func(*DescribeTrunkInterfaceAssociationsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeTrunkInterfaceAssociationsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeTrunkInterfaceAssociationsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeTrunkInterfaceAssociationsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeVolumeAttribute = "DescribeVolumeAttribute"
// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the
@@ -25364,7 +27001,7 @@ func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req
// status of the volume is ok. If the check fails, the overall status is impaired.
// If the status is insufficient-data, then the checks might still be taking
// place on your volume at the time. We recommend that you retry the request.
-// For more information about volume status, see Monitoring the status of your
+// For more information about volume status, see Monitor the status of your
// volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
@@ -25666,7 +27303,7 @@ func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModifica
// You can also use CloudWatch Events to check the status of a modification
// to an EBS volume. For information about CloudWatch Events, see the Amazon
// CloudWatch Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/).
-// For more information, see Monitoring volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods)
+// For more information, see Monitor the progress of volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-modifications.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -26613,12 +28250,12 @@ func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServi
//
// Describes available services to which you can create a VPC endpoint.
//
-// When the service provider and the consumer have different accounts multiple
+// When the service provider and the consumer have different accounts in multiple
// Availability Zones, and the consumer views the VPC endpoint service information,
// the response only includes the common Availability Zones. For example, when
// the service provider account uses us-east-1a and us-east-1c and the consumer
-// uses us-east-1a and us-east-1a and us-east-1b, the response includes the
-// VPC endpoint services in the common Availability Zone, us-east-1a.
+// uses us-east-1a and us-east-1b, the response includes the VPC endpoint services
+// in the common Availability Zone, us-east-1a.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -27479,10 +29116,10 @@ func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Reques
// while the instance is running. To detach the root volume, stop the instance
// first.
//
-// When a volume with an AWS Marketplace product code is detached from an instance,
-// the product code is no longer associated with the instance.
+// When a volume with an Amazon Web Services Marketplace product code is detached
+// from an instance, the product code is no longer associated with the instance.
//
-// For more information, see Detaching an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html)
+// For more information, see Detach an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -27753,6 +29390,161 @@ func (c *EC2) DisableFastSnapshotRestoresWithContext(ctx aws.Context, input *Dis
return out, req.Send()
}
+const opDisableImageDeprecation = "DisableImageDeprecation"
+
+// DisableImageDeprecationRequest generates a "aws/request.Request" representing the
+// client's request for the DisableImageDeprecation operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisableImageDeprecation for more information on using the DisableImageDeprecation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DisableImageDeprecationRequest method.
+// req, resp := client.DisableImageDeprecationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageDeprecation
+func (c *EC2) DisableImageDeprecationRequest(input *DisableImageDeprecationInput) (req *request.Request, output *DisableImageDeprecationOutput) {
+ op := &request.Operation{
+ Name: opDisableImageDeprecation,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisableImageDeprecationInput{}
+ }
+
+ output = &DisableImageDeprecationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisableImageDeprecation API operation for Amazon Elastic Compute Cloud.
+//
+// Cancels the deprecation of the specified AMI.
+//
+// For more information, see Deprecate an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deprecate.html)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DisableImageDeprecation for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageDeprecation
+func (c *EC2) DisableImageDeprecation(input *DisableImageDeprecationInput) (*DisableImageDeprecationOutput, error) {
+ req, out := c.DisableImageDeprecationRequest(input)
+ return out, req.Send()
+}
+
+// DisableImageDeprecationWithContext is the same as DisableImageDeprecation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisableImageDeprecation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DisableImageDeprecationWithContext(ctx aws.Context, input *DisableImageDeprecationInput, opts ...request.Option) (*DisableImageDeprecationOutput, error) {
+ req, out := c.DisableImageDeprecationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDisableSerialConsoleAccess = "DisableSerialConsoleAccess"
+
+// DisableSerialConsoleAccessRequest generates a "aws/request.Request" representing the
+// client's request for the DisableSerialConsoleAccess operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisableSerialConsoleAccess for more information on using the DisableSerialConsoleAccess
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DisableSerialConsoleAccessRequest method.
+// req, resp := client.DisableSerialConsoleAccessRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableSerialConsoleAccess
+func (c *EC2) DisableSerialConsoleAccessRequest(input *DisableSerialConsoleAccessInput) (req *request.Request, output *DisableSerialConsoleAccessOutput) {
+ op := &request.Operation{
+ Name: opDisableSerialConsoleAccess,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisableSerialConsoleAccessInput{}
+ }
+
+ output = &DisableSerialConsoleAccessOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisableSerialConsoleAccess API operation for Amazon Elastic Compute Cloud.
+//
+// Disables access to the EC2 serial console of all instances for your account.
+// By default, access to the EC2 serial console is disabled for your account.
+// For more information, see Manage account access to the EC2 serial console
+// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html#serial-console-account-access)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DisableSerialConsoleAccess for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableSerialConsoleAccess
+func (c *EC2) DisableSerialConsoleAccess(input *DisableSerialConsoleAccessInput) (*DisableSerialConsoleAccessOutput, error) {
+ req, out := c.DisableSerialConsoleAccessRequest(input)
+ return out, req.Send()
+}
+
+// DisableSerialConsoleAccessWithContext is the same as DisableSerialConsoleAccess with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisableSerialConsoleAccess for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DisableSerialConsoleAccessWithContext(ctx aws.Context, input *DisableSerialConsoleAccessInput, opts ...request.Option) (*DisableSerialConsoleAccessOutput, error) {
+ req, out := c.DisableSerialConsoleAccessRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDisableTransitGatewayRouteTablePropagation = "DisableTransitGatewayRouteTablePropagation"
// DisableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the
@@ -28270,13 +30062,12 @@ func (c *EC2) DisassociateEnclaveCertificateIamRoleRequest(input *DisassociateEn
// DisassociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud.
//
-// Disassociates an IAM role from an AWS Certificate Manager (ACM) certificate.
+// Disassociates an IAM role from an Certificate Manager (ACM) certificate.
// Disassociating an IAM role from an ACM certificate removes the Amazon S3
// object that contains the certificate, certificate chain, and encrypted private
// key from the Amazon S3 bucket. It also revokes the IAM role's permission
-// to use the AWS Key Management Service (KMS) customer master key (CMK) used
-// to encrypt the private key. This effectively revokes the role's permission
-// to use the certificate.
+// to use the KMS key used to encrypt the private key. This effectively revokes
+// the role's permission to use the certificate.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -28382,6 +30173,83 @@ func (c *EC2) DisassociateIamInstanceProfileWithContext(ctx aws.Context, input *
return out, req.Send()
}
+const opDisassociateInstanceEventWindow = "DisassociateInstanceEventWindow"
+
+// DisassociateInstanceEventWindowRequest generates a "aws/request.Request" representing the
+// client's request for the DisassociateInstanceEventWindow operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisassociateInstanceEventWindow for more information on using the DisassociateInstanceEventWindow
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DisassociateInstanceEventWindowRequest method.
+// req, resp := client.DisassociateInstanceEventWindowRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateInstanceEventWindow
+func (c *EC2) DisassociateInstanceEventWindowRequest(input *DisassociateInstanceEventWindowInput) (req *request.Request, output *DisassociateInstanceEventWindowOutput) {
+ op := &request.Operation{
+ Name: opDisassociateInstanceEventWindow,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisassociateInstanceEventWindowInput{}
+ }
+
+ output = &DisassociateInstanceEventWindowOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisassociateInstanceEventWindow API operation for Amazon Elastic Compute Cloud.
+//
+// Disassociates one or more targets from an event window.
+//
+// For more information, see Define event windows for scheduled events (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/event-windows.html)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DisassociateInstanceEventWindow for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateInstanceEventWindow
+func (c *EC2) DisassociateInstanceEventWindow(input *DisassociateInstanceEventWindowInput) (*DisassociateInstanceEventWindowOutput, error) {
+ req, out := c.DisassociateInstanceEventWindowRequest(input)
+ return out, req.Send()
+}
+
+// DisassociateInstanceEventWindowWithContext is the same as DisassociateInstanceEventWindow with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisassociateInstanceEventWindow for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DisassociateInstanceEventWindowWithContext(ctx aws.Context, input *DisassociateInstanceEventWindowInput, opts ...request.Option) (*DisassociateInstanceEventWindowOutput, error) {
+ req, out := c.DisassociateInstanceEventWindowRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDisassociateRouteTable = "DisassociateRouteTable"
// DisassociateRouteTableRequest generates a "aws/request.Request" representing the
@@ -28431,7 +30299,7 @@ func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput)
//
// After you perform this action, the subnet no longer uses the routes in the
// route table. Instead, it uses the routes in the VPC's main route table. For
-// more information about route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
+// more information about route tables, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -28686,6 +30554,85 @@ func (c *EC2) DisassociateTransitGatewayRouteTableWithContext(ctx aws.Context, i
return out, req.Send()
}
+const opDisassociateTrunkInterface = "DisassociateTrunkInterface"
+
+// DisassociateTrunkInterfaceRequest generates a "aws/request.Request" representing the
+// client's request for the DisassociateTrunkInterface operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisassociateTrunkInterface for more information on using the DisassociateTrunkInterface
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DisassociateTrunkInterfaceRequest method.
+// req, resp := client.DisassociateTrunkInterfaceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateTrunkInterface
+func (c *EC2) DisassociateTrunkInterfaceRequest(input *DisassociateTrunkInterfaceInput) (req *request.Request, output *DisassociateTrunkInterfaceOutput) {
+ op := &request.Operation{
+ Name: opDisassociateTrunkInterface,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisassociateTrunkInterfaceInput{}
+ }
+
+ output = &DisassociateTrunkInterfaceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisassociateTrunkInterface API operation for Amazon Elastic Compute Cloud.
+//
+//
+// This API action is currently in limited preview only. If you are interested
+// in using this feature, contact your account manager.
+//
+// Removes an association between a branch network interface with a trunk network
+// interface.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DisassociateTrunkInterface for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateTrunkInterface
+func (c *EC2) DisassociateTrunkInterface(input *DisassociateTrunkInterfaceInput) (*DisassociateTrunkInterfaceOutput, error) {
+ req, out := c.DisassociateTrunkInterfaceRequest(input)
+ return out, req.Send()
+}
+
+// DisassociateTrunkInterfaceWithContext is the same as DisassociateTrunkInterface with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisassociateTrunkInterface for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DisassociateTrunkInterfaceWithContext(ctx aws.Context, input *DisassociateTrunkInterfaceInput, opts ...request.Option) (*DisassociateTrunkInterfaceOutput, error) {
+ req, out := c.DisassociateTrunkInterfaceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDisassociateVpcCidrBlock = "DisassociateVpcCidrBlock"
// DisassociateVpcCidrBlockRequest generates a "aws/request.Request" representing the
@@ -28813,12 +30760,12 @@ func (c *EC2) EnableEbsEncryptionByDefaultRequest(input *EnableEbsEncryptionByDe
// Enables EBS encryption by default for your account in the current Region.
//
// After you enable encryption by default, the EBS volumes that you create are
-// are always encrypted, either using the default CMK or the CMK that you specified
-// when you created each volume. For more information, see Amazon EBS encryption
-// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
+// always encrypted, either using the default KMS key or the KMS key that you
+// specified when you created each volume. For more information, see Amazon
+// EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
-// You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId
+// You can specify the default KMS key for encryption by default using ModifyEbsDefaultKmsKeyId
// or ResetEbsDefaultKmsKeyId.
//
// Enabling encryption by default has no effect on the encryption status of
@@ -28938,6 +30885,161 @@ func (c *EC2) EnableFastSnapshotRestoresWithContext(ctx aws.Context, input *Enab
return out, req.Send()
}
+const opEnableImageDeprecation = "EnableImageDeprecation"
+
+// EnableImageDeprecationRequest generates a "aws/request.Request" representing the
+// client's request for the EnableImageDeprecation operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See EnableImageDeprecation for more information on using the EnableImageDeprecation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the EnableImageDeprecationRequest method.
+// req, resp := client.EnableImageDeprecationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageDeprecation
+func (c *EC2) EnableImageDeprecationRequest(input *EnableImageDeprecationInput) (req *request.Request, output *EnableImageDeprecationOutput) {
+ op := &request.Operation{
+ Name: opEnableImageDeprecation,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &EnableImageDeprecationInput{}
+ }
+
+ output = &EnableImageDeprecationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// EnableImageDeprecation API operation for Amazon Elastic Compute Cloud.
+//
+// Enables deprecation of the specified AMI at the specified date and time.
+//
+// For more information, see Deprecate an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deprecate.html)
+// in the Amazon Elastic Compute Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation EnableImageDeprecation for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageDeprecation
+func (c *EC2) EnableImageDeprecation(input *EnableImageDeprecationInput) (*EnableImageDeprecationOutput, error) {
+ req, out := c.EnableImageDeprecationRequest(input)
+ return out, req.Send()
+}
+
+// EnableImageDeprecationWithContext is the same as EnableImageDeprecation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See EnableImageDeprecation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) EnableImageDeprecationWithContext(ctx aws.Context, input *EnableImageDeprecationInput, opts ...request.Option) (*EnableImageDeprecationOutput, error) {
+ req, out := c.EnableImageDeprecationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opEnableSerialConsoleAccess = "EnableSerialConsoleAccess"
+
+// EnableSerialConsoleAccessRequest generates a "aws/request.Request" representing the
+// client's request for the EnableSerialConsoleAccess operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See EnableSerialConsoleAccess for more information on using the EnableSerialConsoleAccess
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the EnableSerialConsoleAccessRequest method.
+// req, resp := client.EnableSerialConsoleAccessRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableSerialConsoleAccess
+func (c *EC2) EnableSerialConsoleAccessRequest(input *EnableSerialConsoleAccessInput) (req *request.Request, output *EnableSerialConsoleAccessOutput) {
+ op := &request.Operation{
+ Name: opEnableSerialConsoleAccess,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &EnableSerialConsoleAccessInput{}
+ }
+
+ output = &EnableSerialConsoleAccessOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// EnableSerialConsoleAccess API operation for Amazon Elastic Compute Cloud.
+//
+// Enables access to the EC2 serial console of all instances for your account.
+// By default, access to the EC2 serial console is disabled for your account.
+// For more information, see Manage account access to the EC2 serial console
+// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html#serial-console-account-access)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation EnableSerialConsoleAccess for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableSerialConsoleAccess
+func (c *EC2) EnableSerialConsoleAccess(input *EnableSerialConsoleAccessInput) (*EnableSerialConsoleAccessOutput, error) {
+ req, out := c.EnableSerialConsoleAccessRequest(input)
+ return out, req.Send()
+}
+
+// EnableSerialConsoleAccessWithContext is the same as EnableSerialConsoleAccess with the addition of
+// the ability to pass a context and additional request options.
+//
+// See EnableSerialConsoleAccess for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) EnableSerialConsoleAccessWithContext(ctx aws.Context, input *EnableSerialConsoleAccessInput, opts ...request.Option) (*EnableSerialConsoleAccessOutput, error) {
+ req, out := c.EnableSerialConsoleAccessRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opEnableTransitGatewayRouteTablePropagation = "EnableTransitGatewayRouteTablePropagation"
// EnableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the
@@ -29524,7 +31626,7 @@ func (c *EC2) ExportImageRequest(input *ExportImageInput) (req *request.Request,
// ExportImage API operation for Amazon Elastic Compute Cloud.
//
// Exports an Amazon Machine Image (AMI) to a VM file. For more information,
-// see Exporting a VM Directory from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html)
+// see Exporting a VM directly from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html)
// in the VM Import/Export User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -29679,11 +31781,10 @@ func (c *EC2) GetAssociatedEnclaveCertificateIamRolesRequest(input *GetAssociate
// GetAssociatedEnclaveCertificateIamRoles API operation for Amazon Elastic Compute Cloud.
//
-// Returns the IAM roles that are associated with the specified AWS Certificate
-// Manager (ACM) certificate. It also returns the name of the Amazon S3 bucket
-// and the Amazon S3 object key where the certificate, certificate chain, and
-// encrypted private key bundle are stored, and the ARN of the AWS Key Management
-// Service (KMS) customer master key (CMK) that's used to encrypt the private
+// Returns the IAM roles that are associated with the specified ACM (ACM) certificate.
+// It also returns the name of the Amazon S3 bucket and the Amazon S3 object
+// key where the certificate, certificate chain, and encrypted private key bundle
+// are stored, and the ARN of the KMS key that's used to encrypt the private
// key.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -29893,8 +31994,8 @@ func (c *EC2) GetCapacityReservationUsageRequest(input *GetCapacityReservationUs
//
// Gets usage information about a Capacity Reservation. If the Capacity Reservation
// is shared, it shows usage information for the Capacity Reservation owner
-// and each AWS account that is currently using the shared capacity. If the
-// Capacity Reservation is not shared, it shows only the Capacity Reservation
+// and each Amazon Web Services account that is currently using the shared capacity.
+// If the Capacity Reservation is not shared, it shows only the Capacity Reservation
// owner's usage.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -30058,8 +32159,8 @@ func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *reques
// during the instance lifecycle. This option is supported on instance types
// that use the Nitro hypervisor.
//
-// For more information, see Instance Console Output (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html#instance-console-console-output)
-// in the Amazon Elastic Compute Cloud User Guide.
+// For more information, see Instance console output (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html#instance-console-console-output)
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -30213,7 +32314,7 @@ func (c *EC2) GetDefaultCreditSpecificationRequest(input *GetDefaultCreditSpecif
// instance family.
//
// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -30287,9 +32388,9 @@ func (c *EC2) GetEbsDefaultKmsKeyIdRequest(input *GetEbsDefaultKmsKeyIdInput) (r
// GetEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud.
//
-// Describes the default customer master key (CMK) for EBS encryption by default
-// for your account in this Region. You can change the default CMK for encryption
-// by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.
+// Describes the default KMS key for EBS encryption by default for your account
+// in this Region. You can change the default KMS key for encryption by default
+// using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.
//
// For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
@@ -30400,6 +32501,93 @@ func (c *EC2) GetEbsEncryptionByDefaultWithContext(ctx aws.Context, input *GetEb
return out, req.Send()
}
+const opGetFlowLogsIntegrationTemplate = "GetFlowLogsIntegrationTemplate"
+
+// GetFlowLogsIntegrationTemplateRequest generates a "aws/request.Request" representing the
+// client's request for the GetFlowLogsIntegrationTemplate operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetFlowLogsIntegrationTemplate for more information on using the GetFlowLogsIntegrationTemplate
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetFlowLogsIntegrationTemplateRequest method.
+// req, resp := client.GetFlowLogsIntegrationTemplateRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetFlowLogsIntegrationTemplate
+func (c *EC2) GetFlowLogsIntegrationTemplateRequest(input *GetFlowLogsIntegrationTemplateInput) (req *request.Request, output *GetFlowLogsIntegrationTemplateOutput) {
+ op := &request.Operation{
+ Name: opGetFlowLogsIntegrationTemplate,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFlowLogsIntegrationTemplateInput{}
+ }
+
+ output = &GetFlowLogsIntegrationTemplateOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetFlowLogsIntegrationTemplate API operation for Amazon Elastic Compute Cloud.
+//
+// Generates a CloudFormation template that streamlines and automates the integration
+// of VPC flow logs with Amazon Athena. This make it easier for you to query
+// and gain insights from VPC flow logs data. Based on the information that
+// you provide, we configure resources in the template to do the following:
+//
+// * Create a table in Athena that maps fields to a custom log format
+//
+// * Create a Lambda function that updates the table with new partitions
+// on a daily, weekly, or monthly basis
+//
+// * Create a table partitioned between two timestamps in the past
+//
+// * Create a set of named queries in Athena that you can use to get started
+// quickly
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetFlowLogsIntegrationTemplate for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetFlowLogsIntegrationTemplate
+func (c *EC2) GetFlowLogsIntegrationTemplate(input *GetFlowLogsIntegrationTemplateInput) (*GetFlowLogsIntegrationTemplateOutput, error) {
+ req, out := c.GetFlowLogsIntegrationTemplateRequest(input)
+ return out, req.Send()
+}
+
+// GetFlowLogsIntegrationTemplateWithContext is the same as GetFlowLogsIntegrationTemplate with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFlowLogsIntegrationTemplate for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetFlowLogsIntegrationTemplateWithContext(ctx aws.Context, input *GetFlowLogsIntegrationTemplateInput, opts ...request.Option) (*GetFlowLogsIntegrationTemplateOutput, error) {
+ req, out := c.GetFlowLogsIntegrationTemplateRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opGetGroupsForCapacityReservation = "GetGroupsForCapacityReservation"
// GetGroupsForCapacityReservationRequest generates a "aws/request.Request" representing the
@@ -31007,7 +33195,7 @@ func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.
// scripts (Windows Server 2016 and later). This usually only happens the first
// time an instance is launched. For more information, see EC2Config (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/UsingConfig_WinAMI.html)
// and EC2Launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2launch.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// For the EC2Config service, the password is not generated for rebundled AMIs
// unless Ec2SetPassword is enabled before bundling.
@@ -31125,6 +33313,158 @@ func (c *EC2) GetReservedInstancesExchangeQuoteWithContext(ctx aws.Context, inpu
return out, req.Send()
}
+const opGetSerialConsoleAccessStatus = "GetSerialConsoleAccessStatus"
+
+// GetSerialConsoleAccessStatusRequest generates a "aws/request.Request" representing the
+// client's request for the GetSerialConsoleAccessStatus operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetSerialConsoleAccessStatus for more information on using the GetSerialConsoleAccessStatus
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetSerialConsoleAccessStatusRequest method.
+// req, resp := client.GetSerialConsoleAccessStatusRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetSerialConsoleAccessStatus
+func (c *EC2) GetSerialConsoleAccessStatusRequest(input *GetSerialConsoleAccessStatusInput) (req *request.Request, output *GetSerialConsoleAccessStatusOutput) {
+ op := &request.Operation{
+ Name: opGetSerialConsoleAccessStatus,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSerialConsoleAccessStatusInput{}
+ }
+
+ output = &GetSerialConsoleAccessStatusOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetSerialConsoleAccessStatus API operation for Amazon Elastic Compute Cloud.
+//
+// Retrieves the access status of your account to the EC2 serial console of
+// all instances. By default, access to the EC2 serial console is disabled for
+// your account. For more information, see Manage account access to the EC2
+// serial console (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html#serial-console-account-access)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetSerialConsoleAccessStatus for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetSerialConsoleAccessStatus
+func (c *EC2) GetSerialConsoleAccessStatus(input *GetSerialConsoleAccessStatusInput) (*GetSerialConsoleAccessStatusOutput, error) {
+ req, out := c.GetSerialConsoleAccessStatusRequest(input)
+ return out, req.Send()
+}
+
+// GetSerialConsoleAccessStatusWithContext is the same as GetSerialConsoleAccessStatus with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSerialConsoleAccessStatus for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetSerialConsoleAccessStatusWithContext(ctx aws.Context, input *GetSerialConsoleAccessStatusInput, opts ...request.Option) (*GetSerialConsoleAccessStatusOutput, error) {
+ req, out := c.GetSerialConsoleAccessStatusRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetSubnetCidrReservations = "GetSubnetCidrReservations"
+
+// GetSubnetCidrReservationsRequest generates a "aws/request.Request" representing the
+// client's request for the GetSubnetCidrReservations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetSubnetCidrReservations for more information on using the GetSubnetCidrReservations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetSubnetCidrReservationsRequest method.
+// req, resp := client.GetSubnetCidrReservationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetSubnetCidrReservations
+func (c *EC2) GetSubnetCidrReservationsRequest(input *GetSubnetCidrReservationsInput) (req *request.Request, output *GetSubnetCidrReservationsOutput) {
+ op := &request.Operation{
+ Name: opGetSubnetCidrReservations,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSubnetCidrReservationsInput{}
+ }
+
+ output = &GetSubnetCidrReservationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetSubnetCidrReservations API operation for Amazon Elastic Compute Cloud.
+//
+// Gets information about the subnet CIDR reservations.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetSubnetCidrReservations for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetSubnetCidrReservations
+func (c *EC2) GetSubnetCidrReservations(input *GetSubnetCidrReservationsInput) (*GetSubnetCidrReservationsOutput, error) {
+ req, out := c.GetSubnetCidrReservationsRequest(input)
+ return out, req.Send()
+}
+
+// GetSubnetCidrReservationsWithContext is the same as GetSubnetCidrReservations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSubnetCidrReservations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetSubnetCidrReservationsWithContext(ctx aws.Context, input *GetSubnetCidrReservationsInput, opts ...request.Option) (*GetSubnetCidrReservationsOutput, error) {
+ req, out := c.GetSubnetCidrReservationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opGetTransitGatewayAttachmentPropagations = "GetTransitGatewayAttachmentPropagations"
// GetTransitGatewayAttachmentPropagationsRequest generates a "aws/request.Request" representing the
@@ -31913,8 +34253,10 @@ func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request,
// ImportImage API operation for Amazon Elastic Compute Cloud.
//
// Import single or multi-volume disk images or EBS snapshots into an Amazon
-// Machine Image (AMI). For more information, see Importing a VM as an Image
-// Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html)
+// Machine Image (AMI).
+//
+// For more information, see Importing a VM as an image using VM Import/Export
+// (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html)
// in the VM Import/Export User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -31990,9 +34332,14 @@ func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Re
// ImportInstance API operation for Amazon Elastic Compute Cloud.
//
// Creates an import instance task using metadata from the specified disk image.
-// ImportInstance only supports single-volume VMs. To import multi-volume VMs,
-// use ImportImage. For more information, see Importing a Virtual Machine Using
-// the Amazon EC2 CLI (https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ec2-cli-vmimport-export.html).
+//
+// This API action supports only single-volume VMs. To import multi-volume VMs,
+// use ImportImage instead.
+//
+// This API action is not supported by the Command Line Interface (CLI). For
+// information about using the Amazon EC2 CLI, which is deprecated, see Importing
+// a VM to Amazon EC2 (https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#UsingVirtualMachinesinAmazonEC2)
+// in the Amazon EC2 CLI Reference PDF file.
//
// For information about the import manifest referenced by this API action,
// see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
@@ -32069,13 +34416,14 @@ func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Requ
// ImportKeyPair API operation for Amazon Elastic Compute Cloud.
//
-// Imports the public key from an RSA key pair that you created with a third-party
-// tool. Compare this with CreateKeyPair, in which AWS creates the key pair
-// and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair,
-// you create the key pair and give AWS just the public key. The private key
-// is never transferred between you and AWS.
+// Imports the public key from an RSA or ED25519 key pair that you created with
+// a third-party tool. Compare this with CreateKeyPair, in which Amazon Web
+// Services creates the key pair and gives the keys to you (Amazon Web Services
+// keeps a copy of the public key). With ImportKeyPair, you create the key pair
+// and give Amazon Web Services just the public key. The private key is never
+// transferred between you and Amazon Web Services.
//
-// For more information about key pairs, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
+// For more information about key pairs, see Amazon EC2 key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -32152,6 +34500,10 @@ func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Re
//
// Imports a disk into an EBS snapshot.
//
+// For more information, see Importing a disk as a snapshot using VM Import/Export
+// (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-import-snapshot.html)
+// in the VM Import/Export User Guide.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -32224,8 +34576,16 @@ func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Reques
// ImportVolume API operation for Amazon Elastic Compute Cloud.
//
-// Creates an import volume task using metadata from the specified disk image.For
-// more information, see Importing Disks to Amazon EBS (https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/importing-your-volumes-into-amazon-ebs.html).
+// Creates an import volume task using metadata from the specified disk image.
+//
+// This API action supports only single-volume VMs. To import multi-volume VMs,
+// use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot
+// instead.
+//
+// This API action is not supported by the Command Line Interface (CLI). For
+// information about using the Amazon EC2 CLI, which is deprecated, see Importing
+// Disks to Amazon EBS (https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#importing-your-volumes-into-amazon-ebs)
+// in the Amazon EC2 CLI Reference PDF file.
//
// For information about the import manifest referenced by this API action,
// see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
@@ -32258,6 +34618,81 @@ func (c *EC2) ImportVolumeWithContext(ctx aws.Context, input *ImportVolumeInput,
return out, req.Send()
}
+const opModifyAddressAttribute = "ModifyAddressAttribute"
+
+// ModifyAddressAttributeRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyAddressAttribute operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyAddressAttribute for more information on using the ModifyAddressAttribute
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ModifyAddressAttributeRequest method.
+// req, resp := client.ModifyAddressAttributeRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyAddressAttribute
+func (c *EC2) ModifyAddressAttributeRequest(input *ModifyAddressAttributeInput) (req *request.Request, output *ModifyAddressAttributeOutput) {
+ op := &request.Operation{
+ Name: opModifyAddressAttribute,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyAddressAttributeInput{}
+ }
+
+ output = &ModifyAddressAttributeOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyAddressAttribute API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies an attribute of the specified Elastic IP address. For requirements,
+// see Using reverse DNS for email applications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html#Using_Elastic_Addressing_Reverse_DNS).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyAddressAttribute for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyAddressAttribute
+func (c *EC2) ModifyAddressAttribute(input *ModifyAddressAttributeInput) (*ModifyAddressAttributeOutput, error) {
+ req, out := c.ModifyAddressAttributeRequest(input)
+ return out, req.Send()
+}
+
+// ModifyAddressAttributeWithContext is the same as ModifyAddressAttribute with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyAddressAttribute for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyAddressAttributeWithContext(ctx aws.Context, input *ModifyAddressAttributeInput, opts ...request.Option) (*ModifyAddressAttributeOutput, error) {
+ req, out := c.ModifyAddressAttributeRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opModifyAvailabilityZoneGroup = "ModifyAvailabilityZoneGroup"
// ModifyAvailabilityZoneGroupRequest generates a "aws/request.Request" representing the
@@ -32535,20 +34970,20 @@ func (c *EC2) ModifyDefaultCreditSpecificationRequest(input *ModifyDefaultCredit
// ModifyDefaultCreditSpecification API operation for Amazon Elastic Compute Cloud.
//
// Modifies the default credit option for CPU usage of burstable performance
-// instances. The default credit option is set at the account level per AWS
-// Region, and is specified per instance family. All new burstable performance
-// instances in the account launch using the default credit option.
+// instances. The default credit option is set at the account level per Amazon
+// Web Services Region, and is specified per instance family. All new burstable
+// performance instances in the account launch using the default credit option.
//
// ModifyDefaultCreditSpecification is an asynchronous operation, which works
-// at an AWS Region level and modifies the credit option for each Availability
-// Zone. All zones in a Region are updated within five minutes. But if instances
-// are launched during this operation, they might not get the new credit option
-// until the zone is updated. To verify whether the update has occurred, you
-// can call GetDefaultCreditSpecification and check DefaultCreditSpecification
+// at an Amazon Web Services Region level and modifies the credit option for
+// each Availability Zone. All zones in a Region are updated within five minutes.
+// But if instances are launched during this operation, they might not get the
+// new credit option until the zone is updated. To verify whether the update
+// has occurred, you can call GetDefaultCreditSpecification and check DefaultCreditSpecification
// for updates.
//
// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -32622,17 +35057,18 @@ func (c *EC2) ModifyEbsDefaultKmsKeyIdRequest(input *ModifyEbsDefaultKmsKeyIdInp
// ModifyEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud.
//
-// Changes the default customer master key (CMK) for EBS encryption by default
-// for your account in this Region.
+// Changes the default KMS key for EBS encryption by default for your account
+// in this Region.
//
-// AWS creates a unique AWS managed CMK in each Region for use with encryption
-// by default. If you change the default CMK to a symmetric customer managed
-// CMK, it is used instead of the AWS managed CMK. To reset the default CMK
-// to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does
-// not support asymmetric CMKs.
+// Amazon Web Services creates a unique Amazon Web Services managed KMS key
+// in each Region for use with encryption by default. If you change the default
+// KMS key to a symmetric customer managed KMS key, it is used instead of the
+// Amazon Web Services managed KMS key. To reset the default KMS key to the
+// Amazon Web Services managed KMS key for EBS, use ResetEbsDefaultKmsKeyId.
+// Amazon EBS does not support asymmetric KMS keys.
//
-// If you delete or disable the customer managed CMK that you specified for
-// use with encryption by default, your instances will fail to launch.
+// If you delete or disable the customer managed KMS key that you specified
+// for use with encryption by default, your instances will fail to launch.
//
// For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
@@ -33260,7 +35696,7 @@ func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput
//
// To modify some attributes, the instance must be stopped. For more information,
// see Modifying attributes of a stopped instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -33415,7 +35851,7 @@ func (c *EC2) ModifyInstanceCreditSpecificationRequest(input *ModifyInstanceCred
// performance instance. The credit options are standard and unlimited.
//
// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -33519,6 +35955,92 @@ func (c *EC2) ModifyInstanceEventStartTimeWithContext(ctx aws.Context, input *Mo
return out, req.Send()
}
+const opModifyInstanceEventWindow = "ModifyInstanceEventWindow"
+
+// ModifyInstanceEventWindowRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyInstanceEventWindow operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyInstanceEventWindow for more information on using the ModifyInstanceEventWindow
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ModifyInstanceEventWindowRequest method.
+// req, resp := client.ModifyInstanceEventWindowRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceEventWindow
+func (c *EC2) ModifyInstanceEventWindowRequest(input *ModifyInstanceEventWindowInput) (req *request.Request, output *ModifyInstanceEventWindowOutput) {
+ op := &request.Operation{
+ Name: opModifyInstanceEventWindow,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyInstanceEventWindowInput{}
+ }
+
+ output = &ModifyInstanceEventWindowOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyInstanceEventWindow API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the specified event window.
+//
+// You can define either a set of time ranges or a cron expression when modifying
+// the event window, but not both.
+//
+// To modify the targets associated with the event window, use the AssociateInstanceEventWindow
+// and DisassociateInstanceEventWindow API.
+//
+// If Amazon Web Services has already scheduled an event, modifying an event
+// window won't change the time of the scheduled event.
+//
+// For more information, see Define event windows for scheduled events (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/event-windows.html)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyInstanceEventWindow for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceEventWindow
+func (c *EC2) ModifyInstanceEventWindow(input *ModifyInstanceEventWindowInput) (*ModifyInstanceEventWindowOutput, error) {
+ req, out := c.ModifyInstanceEventWindowRequest(input)
+ return out, req.Send()
+}
+
+// ModifyInstanceEventWindowWithContext is the same as ModifyInstanceEventWindow with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyInstanceEventWindow for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyInstanceEventWindowWithContext(ctx aws.Context, input *ModifyInstanceEventWindowInput, opts ...request.Option) (*ModifyInstanceEventWindowOutput, error) {
+ req, out := c.ModifyInstanceEventWindowRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opModifyInstanceMetadataOptions = "ModifyInstanceMetadataOptions"
// ModifyInstanceMetadataOptionsRequest generates a "aws/request.Request" representing the
@@ -33569,7 +36091,8 @@ func (c *EC2) ModifyInstanceMetadataOptionsRequest(input *ModifyInstanceMetadata
// the API responds with a state of “pending”. After the parameter modifications
// are successfully applied to the instance, the state of the modifications
// changes from “pending” to “applied” in subsequent describe-instances
-// API calls. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).
+// API calls. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -33977,7 +36500,7 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput
// network platform, and instance type.
//
// For more information, see Modifying Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -34007,6 +36530,80 @@ func (c *EC2) ModifyReservedInstancesWithContext(ctx aws.Context, input *ModifyR
return out, req.Send()
}
+const opModifySecurityGroupRules = "ModifySecurityGroupRules"
+
+// ModifySecurityGroupRulesRequest generates a "aws/request.Request" representing the
+// client's request for the ModifySecurityGroupRules operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifySecurityGroupRules for more information on using the ModifySecurityGroupRules
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ModifySecurityGroupRulesRequest method.
+// req, resp := client.ModifySecurityGroupRulesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySecurityGroupRules
+func (c *EC2) ModifySecurityGroupRulesRequest(input *ModifySecurityGroupRulesInput) (req *request.Request, output *ModifySecurityGroupRulesOutput) {
+ op := &request.Operation{
+ Name: opModifySecurityGroupRules,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifySecurityGroupRulesInput{}
+ }
+
+ output = &ModifySecurityGroupRulesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifySecurityGroupRules API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the rules of a security group.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifySecurityGroupRules for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySecurityGroupRules
+func (c *EC2) ModifySecurityGroupRules(input *ModifySecurityGroupRulesInput) (*ModifySecurityGroupRulesOutput, error) {
+ req, out := c.ModifySecurityGroupRulesRequest(input)
+ return out, req.Send()
+}
+
+// ModifySecurityGroupRulesWithContext is the same as ModifySecurityGroupRules with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifySecurityGroupRules for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifySecurityGroupRulesWithContext(ctx aws.Context, input *ModifySecurityGroupRulesInput, opts ...request.Option) (*ModifySecurityGroupRulesOutput, error) {
+ req, out := c.ModifySecurityGroupRulesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opModifySnapshotAttribute = "ModifySnapshotAttribute"
// ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the
@@ -34053,16 +36650,17 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput
// ModifySnapshotAttribute API operation for Amazon Elastic Compute Cloud.
//
// Adds or removes permission settings for the specified snapshot. You may add
-// or remove specified AWS account IDs from a snapshot's list of create volume
-// permissions, but you cannot do both in a single operation. If you need to
-// both add and remove account IDs for a snapshot, you must use multiple operations.
-// You can make up to 500 modifications to a snapshot in a single operation.
+// or remove specified Amazon Web Services account IDs from a snapshot's list
+// of create volume permissions, but you cannot do both in a single operation.
+// If you need to both add and remove account IDs for a snapshot, you must use
+// multiple operations. You can make up to 500 modifications to a snapshot in
+// a single operation.
//
-// Encrypted snapshots and snapshots with AWS Marketplace product codes cannot
-// be made public. Snapshots encrypted with your default CMK cannot be shared
-// with other accounts.
+// Encrypted snapshots and snapshots with Amazon Web Services Marketplace product
+// codes cannot be made public. Snapshots encrypted with your default KMS key
+// cannot be shared with other accounts.
//
-// For more information about modifying snapshot permissions, see Sharing snapshots
+// For more information about modifying snapshot permissions, see Share a snapshot
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
@@ -34780,34 +37378,28 @@ func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Reques
// size, volume type, and IOPS capacity. If your EBS volume is attached to a
// current-generation EC2 instance type, you might be able to apply these changes
// without stopping the instance or detaching the volume from it. For more information
-// about modifying an EBS volume running Linux, see Modifying the size, IOPS,
-// or type of an EBS volume on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html).
-// For more information about modifying an EBS volume running Windows, see Modifying
-// the size, IOPS, or type of an EBS volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html).
+// about modifying EBS volumes, see Amazon EBS Elastic Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modify-volume.html)
+// (Linux instances) or Amazon EBS Elastic Volumes (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-modify-volume.html)
+// (Windows instances).
//
// When you complete a resize operation on your volume, you need to extend the
// volume's file-system size to take advantage of the new storage capacity.
-// For information about extending a Linux file system, see Extending a Linux
-// file system (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux).
-// For information about extending a Windows file system, see Extending a Windows
-// file system (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows).
+// For more information, see Extend a Linux file system (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux)
+// or Extend a Windows file system (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows).
//
// You can use CloudWatch Events to check the status of a modification to an
// EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch
// Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/).
// You can also track the status of a modification using DescribeVolumesModifications.
-// For information about tracking status changes using either method, see Monitoring
-// volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods).
+// For information about tracking status changes using either method, see Monitor
+// the progress of volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-modifications.html).
//
// With previous-generation instance types, resizing an EBS volume might require
// detaching and reattaching the volume or stopping and restarting the instance.
-// For more information, see Amazon EBS Elastic Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modify-volume.html)
-// (Linux) or Amazon EBS Elastic Volumes (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-modify-volume.html)
-// (Windows).
//
-// If you reach the maximum volume modification rate per volume limit, you will
-// need to wait at least six hours before applying further modifications to
-// the affected EBS volume.
+// If you reach the maximum volume modification rate per volume limit, you must
+// wait at least six hours before applying further modifications to the affected
+// EBS volume.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -35369,16 +37961,17 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringCo
// * Enable/disable the ability to resolve public DNS hostnames to private
// IP addresses when queried from instances in the peer VPC.
//
-// If the peered VPCs are in the same AWS account, you can enable DNS resolution
-// for queries from the local VPC. This ensures that queries from the local
-// VPC resolve to private IP addresses in the peer VPC. This option is not available
-// if the peered VPCs are in different AWS accounts or different Regions. For
-// peered VPCs in different AWS accounts, each AWS account owner must initiate
-// a separate request to modify the peering connection options. For inter-region
-// peering connections, you must use the Region for the requester VPC to modify
-// the requester VPC peering options and the Region for the accepter VPC to
-// modify the accepter VPC peering options. To verify which VPCs are the accepter
-// and the requester for a VPC peering connection, use the DescribeVpcPeeringConnections
+// If the peered VPCs are in the same Amazon Web Services account, you can enable
+// DNS resolution for queries from the local VPC. This ensures that queries
+// from the local VPC resolve to private IP addresses in the peer VPC. This
+// option is not available if the peered VPCs are in different different Amazon
+// Web Services accounts or different Regions. For peered VPCs in different
+// Amazon Web Services accounts, each Amazon Web Services account owner must
+// initiate a separate request to modify the peering connection options. For
+// inter-region peering connections, you must use the Region for the requester
+// VPC to modify the requester VPC peering options and the Region for the accepter
+// VPC to modify the accepter VPC peering options. To verify which VPCs are
+// the accepter and the requester for a VPC peering connection, use the DescribeVpcPeeringConnections
// command.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -35884,7 +38477,7 @@ func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *reques
// Enables detailed monitoring for a running instance. Otherwise, basic monitoring
// is enabled. For more information, see Monitoring your instances and volumes
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// To disable detailed monitoring, see .
//
@@ -36040,16 +38633,16 @@ func (c *EC2) ProvisionByoipCidrRequest(input *ProvisionByoipCidrInput) (req *re
// ProvisionByoipCidr API operation for Amazon Elastic Compute Cloud.
//
-// Provisions an IPv4 or IPv6 address range for use with your AWS resources
-// through bring your own IP addresses (BYOIP) and creates a corresponding address
-// pool. After the address range is provisioned, it is ready to be advertised
+// Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services
+// resources through bring your own IP addresses (BYOIP) and creates a corresponding
+// address pool. After the address range is provisioned, it is ready to be advertised
// using AdvertiseByoipCidr.
//
-// AWS verifies that you own the address range and are authorized to advertise
-// it. You must ensure that the address range is registered to you and that
-// you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise
-// the address range. For more information, see Bring Your Own IP Addresses
-// (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)
+// Amazon Web Services verifies that you own the address range and are authorized
+// to advertise it. You must ensure that the address range is registered to
+// you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618
+// to advertise the address range. For more information, see Bring your own
+// IP addresses (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Provisioning an address range is an asynchronous operation, so the call returns
@@ -36220,7 +38813,7 @@ func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedIn
//
// For more information, see Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html)
// and Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -36388,7 +38981,7 @@ func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.
//
// For more information about troubleshooting, see Getting console output and
// rebooting instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -36470,12 +39063,25 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ
// For Amazon EBS-backed instances, CreateImage creates and registers the AMI
// in a single request, so you don't have to register the AMI yourself.
//
-// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from
-// a snapshot of a root device volume. You specify the snapshot using the block
-// device mapping. For more information, see Launching a Linux instance from
-// a backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html)
+// If needed, you can deregister an AMI at any time. Any modifications you make
+// to an AMI backed by an instance store volume invalidates its registration.
+// If you make changes to an image, deregister the previous image and register
+// the new image.
+//
+// Register a snapshot of a root device volume
+//
+// You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a
+// snapshot of a root device volume. You specify the snapshot using a block
+// device mapping. You can't set the encryption state of the volume using the
+// block device mapping. If the snapshot is encrypted, or encryption by default
+// is enabled, the root volume of an instance launched from the AMI is encrypted.
+//
+// For more information, see Create a Linux AMI from a snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot)
+// and Use encryption with EBS-backed AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
+// AWS Marketplace product codes
+//
// If any snapshots have AWS Marketplace product codes, they are copied to the
// new AMI.
//
@@ -36501,11 +39107,6 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ
// Obtaining billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
-// If needed, you can deregister an AMI at any time. Any modifications you make
-// to an AMI backed by an instance store volume invalidates its registration.
-// If you make changes to an image, deregister the previous image and register
-// the new image.
-//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -37214,7 +39815,7 @@ func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Re
// Be sure to update your DNS records and any servers or devices that communicate
// with the address. If you attempt to release an Elastic IP address that you
// already released, you'll get an AuthFailure error if the address is already
-// allocated to another AWS account.
+// allocated to another Amazon Web Services account.
//
// [EC2-VPC] After you release an Elastic IP address for use in a VPC, you might
// be able to recover it. For more information, see AllocateAddress.
@@ -37616,7 +40217,7 @@ func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Reques
// instance, NAT gateway, VPC peering connection, network interface, egress-only
// internet gateway, or transit gateway.
//
-// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
+// For more information, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -37694,7 +40295,7 @@ func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssoci
// Changes the route table associated with a given subnet, internet gateway,
// or virtual private gateway in a VPC. After the operation completes, the subnet
// or gateway uses the routes in the new route table. For more information about
-// route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
+// route tables, see Route tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// You can also use this operation to change which table is the main route table
@@ -38059,6 +40660,81 @@ func (c *EC2) RequestSpotInstancesWithContext(ctx aws.Context, input *RequestSpo
return out, req.Send()
}
+const opResetAddressAttribute = "ResetAddressAttribute"
+
+// ResetAddressAttributeRequest generates a "aws/request.Request" representing the
+// client's request for the ResetAddressAttribute operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ResetAddressAttribute for more information on using the ResetAddressAttribute
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ResetAddressAttributeRequest method.
+// req, resp := client.ResetAddressAttributeRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetAddressAttribute
+func (c *EC2) ResetAddressAttributeRequest(input *ResetAddressAttributeInput) (req *request.Request, output *ResetAddressAttributeOutput) {
+ op := &request.Operation{
+ Name: opResetAddressAttribute,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ResetAddressAttributeInput{}
+ }
+
+ output = &ResetAddressAttributeOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ResetAddressAttribute API operation for Amazon Elastic Compute Cloud.
+//
+// Resets the attribute of the specified IP address. For requirements, see Using
+// reverse DNS for email applications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html#Using_Elastic_Addressing_Reverse_DNS).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ResetAddressAttribute for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetAddressAttribute
+func (c *EC2) ResetAddressAttribute(input *ResetAddressAttributeInput) (*ResetAddressAttributeOutput, error) {
+ req, out := c.ResetAddressAttributeRequest(input)
+ return out, req.Send()
+}
+
+// ResetAddressAttributeWithContext is the same as ResetAddressAttribute with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ResetAddressAttribute for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ResetAddressAttributeWithContext(ctx aws.Context, input *ResetAddressAttributeInput, opts ...request.Option) (*ResetAddressAttributeOutput, error) {
+ req, out := c.ResetAddressAttributeRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opResetEbsDefaultKmsKeyId = "ResetEbsDefaultKmsKeyId"
// ResetEbsDefaultKmsKeyIdRequest generates a "aws/request.Request" representing the
@@ -38103,12 +40779,13 @@ func (c *EC2) ResetEbsDefaultKmsKeyIdRequest(input *ResetEbsDefaultKmsKeyIdInput
// ResetEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud.
//
-// Resets the default customer master key (CMK) for EBS encryption for your
-// account in this Region to the AWS managed CMK for EBS.
+// Resets the default KMS key for EBS encryption for your account in this Region
+// to the Amazon Web Services managed KMS key for EBS.
//
-// After resetting the default CMK to the AWS managed CMK, you can continue
-// to encrypt by a customer managed CMK by specifying it when you create the
-// volume. For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
+// After resetting the default KMS key to the Amazon Web Services managed KMS
+// key, you can continue to encrypt by a customer managed KMS key by specifying
+// it when you create the volume. For more information, see Amazon EBS encryption
+// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -38344,7 +41021,7 @@ func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput)
// is enabled. The default value is true, which means checking is enabled. This
// value must be false for a NAT instance to perform NAT. For more information,
// see NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html)
-// in the Amazon Virtual Private Cloud User Guide.
+// in the Amazon VPC User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -38497,7 +41174,7 @@ func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput)
//
// Resets permission settings for the specified snapshot.
//
-// For more information about modifying snapshot permissions, see Sharing snapshots
+// For more information about modifying snapshot permissions, see Share a snapshot
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
@@ -38799,23 +41476,25 @@ func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressI
// RevokeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud.
//
-// [VPC only] Removes the specified egress rules from a security group for EC2-VPC.
-// This action does not apply to security groups for use in EC2-Classic. To
-// remove a rule, the values that you specify (for example, ports) must match
-// the existing rule's values exactly.
+// [VPC only] Removes the specified outbound (egress) rules from a security
+// group for EC2-VPC. This action does not apply to security groups for use
+// in EC2-Classic.
+//
+// You can specify rules using either rule IDs or security group rule properties.
+// If you use rule properties, the values that you specify (for example, ports)
+// must match the existing rule's values exactly. Each rule has a protocol,
+// from and to ports, and destination (CIDR range, security group, or prefix
+// list). For the TCP and UDP protocols, you must also specify the destination
+// port or range of ports. For the ICMP protocol, you must also specify the
+// ICMP type and code. If the security group rule has a description, you do
+// not need to specify the description to revoke the rule.
//
// [Default VPC] If the values you specify do not match the existing rule's
// values, no error is returned, and the output describes the security group
// rules that were not revoked.
//
-// AWS recommends that you use DescribeSecurityGroups to verify that the rule
-// has been removed.
-//
-// Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source
-// security group. For the TCP and UDP protocols, you must also specify the
-// destination port or range of ports. For the ICMP protocol, you must also
-// specify the ICMP type and code. If the security group rule has a description,
-// you do not have to specify the description to revoke the rule.
+// Amazon Web Services recommends that you describe the security group to verify
+// that the rules were removed.
//
// Rule changes are propagated to instances within the security group as quickly
// as possible. However, a small delay might occur.
@@ -38892,22 +41571,23 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres
// RevokeSecurityGroupIngress API operation for Amazon Elastic Compute Cloud.
//
-// Removes the specified ingress rules from a security group. To remove a rule,
-// the values that you specify (for example, ports) must match the existing
-// rule's values exactly.
+// Removes the specified inbound (ingress) rules from a security group.
//
-// [EC2-Classic , default VPC] If the values you specify do not match the existing
+// You can specify rules using either rule IDs or security group rule properties.
+// If you use rule properties, the values that you specify (for example, ports)
+// must match the existing rule's values exactly. Each rule has a protocol,
+// from and to ports, and source (CIDR range, security group, or prefix list).
+// For the TCP and UDP protocols, you must also specify the destination port
+// or range of ports. For the ICMP protocol, you must also specify the ICMP
+// type and code. If the security group rule has a description, you do not need
+// to specify the description to revoke the rule.
+//
+// [EC2-Classic, default VPC] If the values you specify do not match the existing
// rule's values, no error is returned, and the output describes the security
// group rules that were not revoked.
//
-// AWS recommends that you use DescribeSecurityGroups to verify that the rule
-// has been removed.
-//
-// Each rule consists of the protocol and the CIDR range or source security
-// group. For the TCP and UDP protocols, you must also specify the destination
-// port or range of ports. For the ICMP protocol, you must also specify the
-// ICMP type and code. If the security group rule has a description, you do
-// not have to specify the description to revoke the rule.
+// Amazon Web Services recommends that you describe the security group to verify
+// that the rules were removed.
//
// Rule changes are propagated to instances within the security group as quickly
// as possible. However, a small delay might occur.
@@ -39031,13 +41711,11 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques
// Linux instances have access to the public key of the key pair at boot. You
// can use this key to provide secure access to the instance. Amazon EC2 public
// images use this feature to provide secure access without passwords. For more
-// information, see Key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// information, see Key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html).
//
// For troubleshooting, see What to do if an instance immediately terminates
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html),
-// and Troubleshooting connecting to your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// and Troubleshooting connecting to your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -39121,7 +41799,7 @@ func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (r
// If you terminate a Scheduled Instance before the current scheduled time period
// ends, you can launch it again after a few minutes. For more information,
// see Scheduled Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-scheduled-instances.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -39633,11 +42311,7 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re
// released and you are not billed for instance usage. However, your root partition
// Amazon EBS volume remains and continues to persist your data, and you are
// charged for Amazon EBS volume usage. You can restart your instance at any
-// time. Every time you start your Windows instance, Amazon EC2 charges you
-// for a full instance hour. If you stop and restart your Windows instance,
-// a new instance hour begins and Amazon EC2 charges you for another full instance
-// hour even if you are still within the same 60-minute period when it was stopped.
-// Every time you start your Linux instance, Amazon EC2 charges a one-minute
+// time. Every time you start your instance, Amazon EC2 charges a one-minute
// minimum for instance usage, and thereafter charges per second for instance
// usage.
//
@@ -39648,7 +42322,7 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re
// root device returns an error.
//
// For more information, see Stopping instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -39886,23 +42560,19 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ
// for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#enabling-hibernation)
// and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites).
// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// We don't charge usage for a stopped instance, or data transfer fees; however,
// your root partition Amazon EBS volume remains and continues to persist your
// data, and you are charged for Amazon EBS volume usage. Every time you start
-// your Windows instance, Amazon EC2 charges you for a full instance hour. If
-// you stop and restart your Windows instance, a new instance hour begins and
-// Amazon EC2 charges you for another full instance hour even if you are still
-// within the same 60-minute period when it was stopped. Every time you start
-// your Linux instance, Amazon EC2 charges a one-minute minimum for instance
-// usage, and thereafter charges per second for instance usage.
+// your instance, Amazon EC2 charges a one-minute minimum for instance usage,
+// and thereafter charges per second for instance usage.
//
// You can't stop or hibernate instance store-backed instances. You can't use
// the Stop action to hibernate Spot Instances, but you can specify that Amazon
// EC2 should hibernate Spot Instances when they are interrupted. For more information,
// see Hibernating interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// When you stop or hibernate an instance, we shut it down. You can restart
// your instance at any time. Before stopping or hibernating an instance, make
@@ -39918,13 +42588,13 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ
// launch are automatically deleted. For more information about the differences
// between rebooting, stopping, hibernating, and terminating instances, see
// Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// When you stop an instance, we attempt to shut it down forcibly after a short
// while. If your instance appears stuck in the stopping state after a period
// of time, there may be an issue with the underlying host computer. For more
// information, see Troubleshooting stopping your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -40080,6 +42750,36 @@ func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *re
// If you specify multiple instances and the request fails (for example, because
// of a single incorrect instance ID), none of the instances are terminated.
//
+// If you terminate multiple instances across multiple Availability Zones, and
+// one or more of the specified instances are enabled for termination protection,
+// the request fails with the following results:
+//
+// * The specified instances that are in the same Availability Zone as the
+// protected instance are not terminated.
+//
+// * The specified instances that are in different Availability Zones, where
+// no other specified instances are protected, are successfully terminated.
+//
+// For example, say you have the following instances:
+//
+// * Instance A: us-east-1a; Not protected
+//
+// * Instance B: us-east-1a; Not protected
+//
+// * Instance C: us-east-1b; Protected
+//
+// * Instance D: us-east-1b; not protected
+//
+// If you attempt to terminate all of these instances in the same request, the
+// request reports failure with the following results:
+//
+// * Instance A and Instance B are successfully terminated because none of
+// the specified instances in us-east-1a are enabled for termination protection.
+//
+// * Instance C and Instance D fail to terminate because at least one of
+// the specified instances in us-east-1b (Instance C) is enabled for termination
+// protection.
+//
// Terminated instances remain visible after termination (for approximately
// one hour).
//
@@ -40094,11 +42794,11 @@ func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *re
// device mapping parameter set to true are automatically deleted. For more
// information about the differences between stopping and terminating instances,
// see Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// For more information about troubleshooting, see Troubleshooting terminating
// your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -40172,7 +42872,8 @@ func (c *EC2) UnassignIpv6AddressesRequest(input *UnassignIpv6AddressesInput) (r
// UnassignIpv6Addresses API operation for Amazon Elastic Compute Cloud.
//
-// Unassigns one or more IPv6 addresses from a network interface.
+// Unassigns one or more IPv6 addresses IPv4 Prefix Delegation prefixes from
+// a network interface.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -40247,7 +42948,8 @@ func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddresse
// UnassignPrivateIpAddresses API operation for Amazon Elastic Compute Cloud.
//
-// Unassigns one or more secondary private IP addresses from a network interface.
+// Unassigns one or more secondary private IP addresses, or IPv4 Prefix Delegation
+// prefixes from a network interface.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -40323,7 +43025,7 @@ func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *re
//
// Disables detailed monitoring for a running instance. For more information,
// see Monitoring your instances and volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -40399,11 +43101,8 @@ func (c *EC2) UpdateSecurityGroupRuleDescriptionsEgressRequest(input *UpdateSecu
//
// [VPC only] Updates the description of an egress (outbound) security group
// rule. You can replace an existing description, or add a description to a
-// rule that did not have one previously.
-//
-// You specify the description as part of the IP permissions structure. You
-// can remove a description for a security group rule by omitting the description
-// parameter in the request.
+// rule that did not have one previously. You can remove a description for a
+// security group rule by omitting the description parameter in the request.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -40479,11 +43178,8 @@ func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngressRequest(input *UpdateSec
//
// Updates the description of an ingress (inbound) security group rule. You
// can replace an existing description, or add a description to a rule that
-// did not have one previously.
-//
-// You specify the description as part of the IP permissions structure. You
-// can remove a description for a security group rule by omitting the description
-// parameter in the request.
+// did not have one previously. You can remove a description for a security
+// group rule by omitting the description parameter in the request.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -40563,7 +43259,7 @@ func (c *EC2) WithdrawByoipCidrRequest(input *WithdrawByoipCidrInput) (req *requ
// specify different address ranges each time.
//
// It can take a few minutes before traffic to the specified addresses stops
-// routing to AWS because of BGP propagation delays.
+// routing to Amazon Web Services because of BGP propagation delays.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -41253,7 +43949,7 @@ type Address struct {
// The ID of the network interface.
NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
- // The ID of the AWS account that owns the network interface.
+ // The ID of the Amazon Web Services account that owns the network interface.
NetworkInterfaceOwnerId *string `locationName:"networkInterfaceOwnerId" type:"string"`
// The private IP address associated with the Elastic IP address.
@@ -41363,6 +44059,57 @@ func (s *Address) SetTags(v []*Tag) *Address {
return s
}
+// The attributes associated with an Elastic IP address.
+type AddressAttribute struct {
+ _ struct{} `type:"structure"`
+
+ // [EC2-VPC] The allocation ID.
+ AllocationId *string `locationName:"allocationId" type:"string"`
+
+ // The pointer (PTR) record for the IP address.
+ PtrRecord *string `locationName:"ptrRecord" type:"string"`
+
+ // The updated PTR record for the IP address.
+ PtrRecordUpdate *PtrUpdateStatus `locationName:"ptrRecordUpdate" type:"structure"`
+
+ // The public IP address.
+ PublicIp *string `locationName:"publicIp" type:"string"`
+}
+
+// String returns the string representation
+func (s AddressAttribute) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AddressAttribute) GoString() string {
+ return s.String()
+}
+
+// SetAllocationId sets the AllocationId field's value.
+func (s *AddressAttribute) SetAllocationId(v string) *AddressAttribute {
+ s.AllocationId = &v
+ return s
+}
+
+// SetPtrRecord sets the PtrRecord field's value.
+func (s *AddressAttribute) SetPtrRecord(v string) *AddressAttribute {
+ s.PtrRecord = &v
+ return s
+}
+
+// SetPtrRecordUpdate sets the PtrRecordUpdate field's value.
+func (s *AddressAttribute) SetPtrRecordUpdate(v *PtrUpdateStatus) *AddressAttribute {
+ s.PtrRecordUpdate = v
+ return s
+}
+
+// SetPublicIp sets the PublicIp field's value.
+func (s *AddressAttribute) SetPublicIp(v string) *AddressAttribute {
+ s.PublicIp = &v
+ return s
+}
+
type AdvertiseByoipCidrInput struct {
_ struct{} `type:"structure"`
@@ -41538,8 +44285,8 @@ func (s *AllocateAddressInput) SetTagSpecifications(v []*TagSpecification) *Allo
type AllocateAddressOutput struct {
_ struct{} `type:"structure"`
- // [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic
- // IP address for use with instances in a VPC.
+ // [EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation
+ // of the Elastic IP address for use with instances in a VPC.
AllocationId *string `locationName:"allocationId" type:"string"`
// The carrier IP address. This option is only available for network interfaces
@@ -41631,8 +44378,8 @@ type AllocateHostsInput struct {
// Indicates whether the host accepts any untargeted instance launches that
// match its instance type configuration, or if it only accepts Host tenancy
// instance launches that specify its unique host ID. For more information,
- // see Understanding Instance Placement and Host Affinity (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-dedicated-hosts-work.html#dedicated-hosts-understanding)
- // in the Amazon EC2 User Guide for Linux Instances.
+ // see Understanding auto-placement and affinity (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-dedicated-hosts-work.html#dedicated-hosts-understanding)
+ // in the Amazon EC2 User Guide.
//
// Default: on
AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"`
@@ -41643,13 +44390,13 @@ type AllocateHostsInput struct {
AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `locationName:"clientToken" type:"string"`
// Indicates whether to enable or disable host recovery for the Dedicated Host.
- // Host recovery is disabled by default. For more information, see Host Recovery
+ // Host recovery is disabled by default. For more information, see Host recovery
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
//
// Default: off
HostRecovery *string `type:"string" enum:"HostRecovery"`
@@ -42101,7 +44848,7 @@ type AnalysisRouteTableRoute struct {
// The destination IPv4 address, in CIDR notation.
DestinationCidr *string `locationName:"destinationCidr" type:"string"`
- // The prefix of the AWS service.
+ // The prefix of the Amazon Web Service.
DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"`
// The ID of an egress-only internet gateway.
@@ -42384,8 +45131,10 @@ func (s *ApplySecurityGroupsToClientVpnTargetNetworkOutput) SetSecurityGroupIds(
type AssignIpv6AddressesInput struct {
_ struct{} `type:"structure"`
- // The number of IPv6 addresses to assign to the network interface. Amazon EC2
- // automatically selects the IPv6 addresses from the subnet range. You can't
+ // The number of additional IPv6 addresses to assign to the network interface.
+ // The specified number of IPv6 addresses are assigned in addition to the existing
+ // IPv6 addresses that are already assigned to the network interface. Amazon
+ // EC2 automatically selects the IPv6 addresses from the subnet range. You can't
// use this option if specifying specific IPv6 addresses.
Ipv6AddressCount *int64 `locationName:"ipv6AddressCount" type:"integer"`
@@ -42393,6 +45142,15 @@ type AssignIpv6AddressesInput struct {
// You can't use this option if you're specifying a number of IPv6 addresses.
Ipv6Addresses []*string `locationName:"ipv6Addresses" locationNameList:"item" type:"list"`
+ // The number of IPv6 prefixes that Amazon Web Services automatically assigns
+ // to the network interface. You cannot use this option if you use the Ipv6Prefixes
+ // option.
+ Ipv6PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv6 prefixes assigned to the network interface. You cannot use
+ // this option if you use the Ipv6PrefixCount option.
+ Ipv6Prefixes []*string `locationName:"Ipv6Prefix" locationNameList:"item" type:"list"`
+
// The ID of the network interface.
//
// NetworkInterfaceId is a required field
@@ -42434,6 +45192,18 @@ func (s *AssignIpv6AddressesInput) SetIpv6Addresses(v []*string) *AssignIpv6Addr
return s
}
+// SetIpv6PrefixCount sets the Ipv6PrefixCount field's value.
+func (s *AssignIpv6AddressesInput) SetIpv6PrefixCount(v int64) *AssignIpv6AddressesInput {
+ s.Ipv6PrefixCount = &v
+ return s
+}
+
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *AssignIpv6AddressesInput) SetIpv6Prefixes(v []*string) *AssignIpv6AddressesInput {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
func (s *AssignIpv6AddressesInput) SetNetworkInterfaceId(v string) *AssignIpv6AddressesInput {
s.NetworkInterfaceId = &v
@@ -42443,9 +45213,13 @@ func (s *AssignIpv6AddressesInput) SetNetworkInterfaceId(v string) *AssignIpv6Ad
type AssignIpv6AddressesOutput struct {
_ struct{} `type:"structure"`
- // The IPv6 addresses assigned to the network interface.
+ // The new IPv6 addresses assigned to the network interface. Existing IPv6 addresses
+ // that were assigned to the network interface before the request are not included.
AssignedIpv6Addresses []*string `locationName:"assignedIpv6Addresses" locationNameList:"item" type:"list"`
+ // The IPv6 prefixes that are assigned to the network interface.
+ AssignedIpv6Prefixes []*string `locationName:"assignedIpv6PrefixSet" locationNameList:"item" type:"list"`
+
// The ID of the network interface.
NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
}
@@ -42466,6 +45240,12 @@ func (s *AssignIpv6AddressesOutput) SetAssignedIpv6Addresses(v []*string) *Assig
return s
}
+// SetAssignedIpv6Prefixes sets the AssignedIpv6Prefixes field's value.
+func (s *AssignIpv6AddressesOutput) SetAssignedIpv6Prefixes(v []*string) *AssignIpv6AddressesOutput {
+ s.AssignedIpv6Prefixes = v
+ return s
+}
+
// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
func (s *AssignIpv6AddressesOutput) SetNetworkInterfaceId(v string) *AssignIpv6AddressesOutput {
s.NetworkInterfaceId = &v
@@ -42480,6 +45260,15 @@ type AssignPrivateIpAddressesInput struct {
// network interface or instance to be reassigned to the specified network interface.
AllowReassignment *bool `locationName:"allowReassignment" type:"boolean"`
+ // The number of IPv4 prefixes that Amazon Web Services automatically assigns
+ // to the network interface. You cannot use this option if you use the Ipv4
+ // Prefixes option.
+ Ipv4PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv4 prefixes assigned to the network interface. You cannot use
+ // this option if you use the Ipv4PrefixCount option.
+ Ipv4Prefixes []*string `locationName:"Ipv4Prefix" locationNameList:"item" type:"list"`
+
// The ID of the network interface.
//
// NetworkInterfaceId is a required field
@@ -42527,6 +45316,18 @@ func (s *AssignPrivateIpAddressesInput) SetAllowReassignment(v bool) *AssignPriv
return s
}
+// SetIpv4PrefixCount sets the Ipv4PrefixCount field's value.
+func (s *AssignPrivateIpAddressesInput) SetIpv4PrefixCount(v int64) *AssignPrivateIpAddressesInput {
+ s.Ipv4PrefixCount = &v
+ return s
+}
+
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *AssignPrivateIpAddressesInput) SetIpv4Prefixes(v []*string) *AssignPrivateIpAddressesInput {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
func (s *AssignPrivateIpAddressesInput) SetNetworkInterfaceId(v string) *AssignPrivateIpAddressesInput {
s.NetworkInterfaceId = &v
@@ -42548,6 +45349,9 @@ func (s *AssignPrivateIpAddressesInput) SetSecondaryPrivateIpAddressCount(v int6
type AssignPrivateIpAddressesOutput struct {
_ struct{} `type:"structure"`
+ // The IPv4 prefixes that are assigned to the network interface.
+ AssignedIpv4Prefixes []*Ipv4PrefixSpecification `locationName:"assignedIpv4PrefixSet" locationNameList:"item" type:"list"`
+
// The private IP addresses assigned to the network interface.
AssignedPrivateIpAddresses []*AssignedPrivateIpAddress `locationName:"assignedPrivateIpAddressesSet" locationNameList:"item" type:"list"`
@@ -42565,6 +45369,12 @@ func (s AssignPrivateIpAddressesOutput) GoString() string {
return s.String()
}
+// SetAssignedIpv4Prefixes sets the AssignedIpv4Prefixes field's value.
+func (s *AssignPrivateIpAddressesOutput) SetAssignedIpv4Prefixes(v []*Ipv4PrefixSpecification) *AssignPrivateIpAddressesOutput {
+ s.AssignedIpv4Prefixes = v
+ return s
+}
+
// SetAssignedPrivateIpAddresses sets the AssignedPrivateIpAddresses field's value.
func (s *AssignPrivateIpAddressesOutput) SetAssignedPrivateIpAddresses(v []*AssignedPrivateIpAddress) *AssignPrivateIpAddressesOutput {
s.AssignedPrivateIpAddresses = v
@@ -42621,10 +45431,10 @@ type AssociateAddressInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
- // The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you
- // can specify either the instance ID or the network interface ID, but not both.
- // The operation fails if you specify an instance ID unless exactly one network
- // interface is attached.
+ // The ID of the instance. The instance must have exactly one attached network
+ // interface. For EC2-VPC, you can specify either the instance ID or the network
+ // interface ID, but not both. For EC2-Classic, you must specify an instance
+ // ID and the instance must be in the running state.
InstanceId *string `type:"string"`
// [EC2-VPC] The ID of the network interface. If the instance has more than
@@ -42639,8 +45449,8 @@ type AssociateAddressInput struct {
// address is associated with the primary private IP address.
PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
- // The Elastic IP address to associate with the instance. This is required for
- // EC2-Classic.
+ // [EC2-Classic] The Elastic IP address to associate with the instance. This
+ // is required for EC2-Classic.
PublicIp *string `type:"string"`
}
@@ -42973,10 +45783,10 @@ type AssociateEnclaveCertificateIamRoleOutput struct {
CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"`
// The Amazon S3 object key where the certificate, certificate chain, and encrypted
- // private key bundle are stored. The object key is formatted as follows: certificate_arn/role_arn.
+ // private key bundle are stored. The object key is formatted as follows: role_arn/certificate_arn.
CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"`
- // The ID of the AWS KMS CMK used to encrypt the private key of the certificate.
+ // The ID of the KMS key used to encrypt the private key of the certificate.
EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"`
}
@@ -43083,6 +45893,93 @@ func (s *AssociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation(v *
return s
}
+type AssociateInstanceEventWindowInput struct {
+ _ struct{} `type:"structure"`
+
+ // One or more targets associated with the specified event window.
+ //
+ // AssociationTarget is a required field
+ AssociationTarget *InstanceEventWindowAssociationRequest `type:"structure" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the event window.
+ //
+ // InstanceEventWindowId is a required field
+ InstanceEventWindowId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssociateInstanceEventWindowInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssociateInstanceEventWindowInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssociateInstanceEventWindowInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssociateInstanceEventWindowInput"}
+ if s.AssociationTarget == nil {
+ invalidParams.Add(request.NewErrParamRequired("AssociationTarget"))
+ }
+ if s.InstanceEventWindowId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceEventWindowId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAssociationTarget sets the AssociationTarget field's value.
+func (s *AssociateInstanceEventWindowInput) SetAssociationTarget(v *InstanceEventWindowAssociationRequest) *AssociateInstanceEventWindowInput {
+ s.AssociationTarget = v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *AssociateInstanceEventWindowInput) SetDryRun(v bool) *AssociateInstanceEventWindowInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetInstanceEventWindowId sets the InstanceEventWindowId field's value.
+func (s *AssociateInstanceEventWindowInput) SetInstanceEventWindowId(v string) *AssociateInstanceEventWindowInput {
+ s.InstanceEventWindowId = &v
+ return s
+}
+
+type AssociateInstanceEventWindowOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the event window.
+ InstanceEventWindow *InstanceEventWindow `locationName:"instanceEventWindow" type:"structure"`
+}
+
+// String returns the string representation
+func (s AssociateInstanceEventWindowOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssociateInstanceEventWindowOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceEventWindow sets the InstanceEventWindow field's value.
+func (s *AssociateInstanceEventWindowOutput) SetInstanceEventWindow(v *InstanceEventWindow) *AssociateInstanceEventWindowOutput {
+ s.InstanceEventWindow = v
+ return s
+}
+
type AssociateRouteTableInput struct {
_ struct{} `type:"structure"`
@@ -43432,6 +46329,132 @@ func (s *AssociateTransitGatewayRouteTableOutput) SetAssociation(v *TransitGatew
return s
}
+type AssociateTrunkInterfaceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the branch network interface.
+ //
+ // BranchInterfaceId is a required field
+ BranchInterfaceId *string `type:"string" required:"true"`
+
+ // Unique, case-sensitive identifier that you provide to ensure the idempotency
+ // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The application key. This applies to the GRE protocol.
+ GreKey *int64 `type:"integer"`
+
+ // The ID of the trunk network interface.
+ //
+ // TrunkInterfaceId is a required field
+ TrunkInterfaceId *string `type:"string" required:"true"`
+
+ // The ID of the VLAN. This applies to the VLAN protocol.
+ VlanId *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssociateTrunkInterfaceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssociateTrunkInterfaceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssociateTrunkInterfaceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssociateTrunkInterfaceInput"}
+ if s.BranchInterfaceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("BranchInterfaceId"))
+ }
+ if s.TrunkInterfaceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TrunkInterfaceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBranchInterfaceId sets the BranchInterfaceId field's value.
+func (s *AssociateTrunkInterfaceInput) SetBranchInterfaceId(v string) *AssociateTrunkInterfaceInput {
+ s.BranchInterfaceId = &v
+ return s
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *AssociateTrunkInterfaceInput) SetClientToken(v string) *AssociateTrunkInterfaceInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *AssociateTrunkInterfaceInput) SetDryRun(v bool) *AssociateTrunkInterfaceInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetGreKey sets the GreKey field's value.
+func (s *AssociateTrunkInterfaceInput) SetGreKey(v int64) *AssociateTrunkInterfaceInput {
+ s.GreKey = &v
+ return s
+}
+
+// SetTrunkInterfaceId sets the TrunkInterfaceId field's value.
+func (s *AssociateTrunkInterfaceInput) SetTrunkInterfaceId(v string) *AssociateTrunkInterfaceInput {
+ s.TrunkInterfaceId = &v
+ return s
+}
+
+// SetVlanId sets the VlanId field's value.
+func (s *AssociateTrunkInterfaceInput) SetVlanId(v int64) *AssociateTrunkInterfaceInput {
+ s.VlanId = &v
+ return s
+}
+
+type AssociateTrunkInterfaceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Unique, case-sensitive identifier that you provide to ensure the idempotency
+ // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ ClientToken *string `locationName:"clientToken" type:"string"`
+
+ // Information about the association between the trunk network interface and
+ // branch network interface.
+ InterfaceAssociation *TrunkInterfaceAssociation `locationName:"interfaceAssociation" type:"structure"`
+}
+
+// String returns the string representation
+func (s AssociateTrunkInterfaceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssociateTrunkInterfaceOutput) GoString() string {
+ return s.String()
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *AssociateTrunkInterfaceOutput) SetClientToken(v string) *AssociateTrunkInterfaceOutput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetInterfaceAssociation sets the InterfaceAssociation field's value.
+func (s *AssociateTrunkInterfaceOutput) SetInterfaceAssociation(v *TrunkInterfaceAssociation) *AssociateTrunkInterfaceOutput {
+ s.InterfaceAssociation = v
+ return s
+}
+
type AssociateVpcCidrBlockInput struct {
_ struct{} `type:"structure"`
@@ -43578,7 +46601,7 @@ type AssociatedRole struct {
// The key of the Amazon S3 object ey where the certificate, certificate chain,
// and encrypted private key bundle is stored. The object key is formated as
- // follows: certificate_arn/role_arn.
+ // follows: role_arn/certificate_arn.
CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"`
// The ID of the KMS customer master key (CMK) used to encrypt the private key.
@@ -43686,6 +46709,77 @@ func (s *AssociationStatus) SetMessage(v string) *AssociationStatus {
return s
}
+// Describes integration options for Amazon Athena.
+type AthenaIntegration struct {
+ _ struct{} `type:"structure"`
+
+ // The location in Amazon S3 to store the generated CloudFormation template.
+ //
+ // IntegrationResultS3DestinationArn is a required field
+ IntegrationResultS3DestinationArn *string `type:"string" required:"true"`
+
+ // The end date for the partition.
+ PartitionEndDate *time.Time `type:"timestamp"`
+
+ // The schedule for adding new partitions to the table.
+ //
+ // PartitionLoadFrequency is a required field
+ PartitionLoadFrequency *string `type:"string" required:"true" enum:"PartitionLoadFrequency"`
+
+ // The start date for the partition.
+ PartitionStartDate *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation
+func (s AthenaIntegration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AthenaIntegration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AthenaIntegration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AthenaIntegration"}
+ if s.IntegrationResultS3DestinationArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("IntegrationResultS3DestinationArn"))
+ }
+ if s.PartitionLoadFrequency == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartitionLoadFrequency"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIntegrationResultS3DestinationArn sets the IntegrationResultS3DestinationArn field's value.
+func (s *AthenaIntegration) SetIntegrationResultS3DestinationArn(v string) *AthenaIntegration {
+ s.IntegrationResultS3DestinationArn = &v
+ return s
+}
+
+// SetPartitionEndDate sets the PartitionEndDate field's value.
+func (s *AthenaIntegration) SetPartitionEndDate(v time.Time) *AthenaIntegration {
+ s.PartitionEndDate = &v
+ return s
+}
+
+// SetPartitionLoadFrequency sets the PartitionLoadFrequency field's value.
+func (s *AthenaIntegration) SetPartitionLoadFrequency(v string) *AthenaIntegration {
+ s.PartitionLoadFrequency = &v
+ return s
+}
+
+// SetPartitionStartDate sets the PartitionStartDate field's value.
+func (s *AthenaIntegration) SetPartitionStartDate(v time.Time) *AthenaIntegration {
+ s.PartitionStartDate = &v
+ return s
+}
+
type AttachClassicLinkVpcInput struct {
_ struct{} `type:"structure"`
@@ -44441,6 +47535,9 @@ type AuthorizeSecurityGroupEgressInput struct {
// group.
SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"`
+ // The tags applied to the security group rule.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+
// Not supported. Use a set of IP permissions to specify the port.
ToPort *int64 `locationName:"toPort" type:"integer"`
}
@@ -44516,6 +47613,12 @@ func (s *AuthorizeSecurityGroupEgressInput) SetSourceSecurityGroupOwnerId(v stri
return s
}
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *AuthorizeSecurityGroupEgressInput) SetTagSpecifications(v []*TagSpecification) *AuthorizeSecurityGroupEgressInput {
+ s.TagSpecifications = v
+ return s
+}
+
// SetToPort sets the ToPort field's value.
func (s *AuthorizeSecurityGroupEgressInput) SetToPort(v int64) *AuthorizeSecurityGroupEgressInput {
s.ToPort = &v
@@ -44524,6 +47627,12 @@ func (s *AuthorizeSecurityGroupEgressInput) SetToPort(v int64) *AuthorizeSecurit
type AuthorizeSecurityGroupEgressOutput struct {
_ struct{} `type:"structure"`
+
+ // Returns true if the request succeeds; otherwise, returns an error.
+ Return *bool `locationName:"return" type:"boolean"`
+
+ // Information about the outbound (egress) security group rules that were added.
+ SecurityGroupRules []*SecurityGroupRule `locationName:"securityGroupRuleSet" locationNameList:"item" type:"list"`
}
// String returns the string representation
@@ -44536,6 +47645,18 @@ func (s AuthorizeSecurityGroupEgressOutput) GoString() string {
return s.String()
}
+// SetReturn sets the Return field's value.
+func (s *AuthorizeSecurityGroupEgressOutput) SetReturn(v bool) *AuthorizeSecurityGroupEgressOutput {
+ s.Return = &v
+ return s
+}
+
+// SetSecurityGroupRules sets the SecurityGroupRules field's value.
+func (s *AuthorizeSecurityGroupEgressOutput) SetSecurityGroupRules(v []*SecurityGroupRule) *AuthorizeSecurityGroupEgressOutput {
+ s.SecurityGroupRules = v
+ return s
+}
+
type AuthorizeSecurityGroupIngressInput struct {
_ struct{} `type:"structure"`
@@ -44593,14 +47714,18 @@ type AuthorizeSecurityGroupIngressInput struct {
// be in the same VPC.
SourceSecurityGroupName *string `type:"string"`
- // [nondefault VPC] The AWS account ID for the source security group, if the
- // source security group is in a different account. You can't specify this parameter
- // in combination with the following parameters: the CIDR IP address range,
- // the IP protocol, the start of the port range, and the end of the port range.
- // Creates rules that grant full ICMP, UDP, and TCP access. To create a rule
- // with a specific IP protocol and port range, use a set of IP permissions instead.
+ // [nondefault VPC] The Amazon Web Services account ID for the source security
+ // group, if the source security group is in a different account. You can't
+ // specify this parameter in combination with the following parameters: the
+ // CIDR IP address range, the IP protocol, the start of the port range, and
+ // the end of the port range. Creates rules that grant full ICMP, UDP, and TCP
+ // access. To create a rule with a specific IP protocol and port range, use
+ // a set of IP permissions instead.
SourceSecurityGroupOwnerId *string `type:"string"`
+ // [VPC Only] The tags applied to the security group rule.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+
// The end of port range for the TCP and UDP protocols, or an ICMP code number.
// For the ICMP code number, use -1 to specify all codes. If you specify all
// ICMP types, you must specify all codes.
@@ -44674,6 +47799,12 @@ func (s *AuthorizeSecurityGroupIngressInput) SetSourceSecurityGroupOwnerId(v str
return s
}
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *AuthorizeSecurityGroupIngressInput) SetTagSpecifications(v []*TagSpecification) *AuthorizeSecurityGroupIngressInput {
+ s.TagSpecifications = v
+ return s
+}
+
// SetToPort sets the ToPort field's value.
func (s *AuthorizeSecurityGroupIngressInput) SetToPort(v int64) *AuthorizeSecurityGroupIngressInput {
s.ToPort = &v
@@ -44682,6 +47813,12 @@ func (s *AuthorizeSecurityGroupIngressInput) SetToPort(v int64) *AuthorizeSecuri
type AuthorizeSecurityGroupIngressOutput struct {
_ struct{} `type:"structure"`
+
+ // Returns true if the request succeeds; otherwise, returns an error.
+ Return *bool `locationName:"return" type:"boolean"`
+
+ // Information about the inbound (ingress) security group rules that were added.
+ SecurityGroupRules []*SecurityGroupRule `locationName:"securityGroupRuleSet" locationNameList:"item" type:"list"`
}
// String returns the string representation
@@ -44694,6 +47831,18 @@ func (s AuthorizeSecurityGroupIngressOutput) GoString() string {
return s.String()
}
+// SetReturn sets the Return field's value.
+func (s *AuthorizeSecurityGroupIngressOutput) SetReturn(v bool) *AuthorizeSecurityGroupIngressOutput {
+ s.Return = &v
+ return s
+}
+
+// SetSecurityGroupRules sets the SecurityGroupRules field's value.
+func (s *AuthorizeSecurityGroupIngressOutput) SetSecurityGroupRules(v []*SecurityGroupRule) *AuthorizeSecurityGroupIngressOutput {
+ s.SecurityGroupRules = v
+ return s
+}
+
// Describes Availability Zones, Local Zones, and Wavelength Zones.
type AvailabilityZone struct {
_ struct{} `type:"structure"`
@@ -44904,7 +48053,8 @@ func (s *BlobAttributeValue) SetValue(v []byte) *BlobAttributeValue {
return s
}
-// Describes a block device mapping.
+// Describes a block device mapping, which defines the EBS volumes and instance
+// store volumes to attach to an instance at launch.
type BlockDeviceMapping struct {
_ struct{} `type:"structure"`
@@ -44915,8 +48065,9 @@ type BlockDeviceMapping struct {
// launched.
Ebs *EbsBlockDevice `locationName:"ebs" type:"structure"`
- // Suppresses the specified device included in the block device mapping of the
- // AMI.
+ // To omit the device from the block device mapping, specify an empty string.
+ // When this property is specified, the device is removed from the block device
+ // mapping regardless of the assigned value.
NoDevice *string `locationName:"noDevice" type:"string"`
// The virtual device name (ephemeralN). Instance store volumes are numbered
@@ -45187,7 +48338,7 @@ func (s *BundleTaskError) SetMessage(v string) *BundleTaskError {
}
// Information about an address range that is provisioned for use with your
-// AWS resources through bring your own IP addresses (BYOIP).
+// Amazon Web Services resources through bring your own IP addresses (BYOIP).
type ByoipCidr struct {
_ struct{} `type:"structure"`
@@ -46046,9 +49197,16 @@ type CapacityReservation struct {
// The type of instance for which the Capacity Reservation reserves capacity.
InstanceType *string `locationName:"instanceType" type:"string"`
- // The ID of the AWS account that owns the Capacity Reservation.
+ // The Amazon Resource Name (ARN) of the Outpost on which the Capacity Reservation
+ // was created.
+ OutpostArn *string `locationName:"outpostArn" type:"string"`
+
+ // The ID of the Amazon Web Services account that owns the Capacity Reservation.
OwnerId *string `locationName:"ownerId" type:"string"`
+ // The date and time at which the Capacity Reservation was started.
+ StartDate *time.Time `locationName:"startDate" type:"timestamp"`
+
// The current state of the Capacity Reservation. A Capacity Reservation can
// be in one of the following states:
//
@@ -46059,8 +49217,8 @@ type CapacityReservation struct {
// and time specified in your request. The reserved capacity is no longer
// available for your use.
//
- // * cancelled - The Capacity Reservation was manually cancelled. The reserved
- // capacity is no longer available for your use.
+ // * cancelled - The Capacity Reservation was cancelled. The reserved capacity
+ // is no longer available for your use.
//
// * pending - The Capacity Reservation request was successful but the capacity
// provisioning is still pending.
@@ -46077,10 +49235,10 @@ type CapacityReservation struct {
// can have one of the following tenancy settings:
//
// * default - The Capacity Reservation is created on hardware that is shared
- // with other AWS accounts.
+ // with other Amazon Web Services accounts.
//
// * dedicated - The Capacity Reservation is created on single-tenant hardware
- // that is dedicated to a single AWS account.
+ // that is dedicated to a single Amazon Web Services account.
Tenancy *string `locationName:"tenancy" type:"string" enum:"CapacityReservationTenancy"`
// The total number of instances for which the Capacity Reservation reserves
@@ -46176,12 +49334,24 @@ func (s *CapacityReservation) SetInstanceType(v string) *CapacityReservation {
return s
}
+// SetOutpostArn sets the OutpostArn field's value.
+func (s *CapacityReservation) SetOutpostArn(v string) *CapacityReservation {
+ s.OutpostArn = &v
+ return s
+}
+
// SetOwnerId sets the OwnerId field's value.
func (s *CapacityReservation) SetOwnerId(v string) *CapacityReservation {
s.OwnerId = &v
return s
}
+// SetStartDate sets the StartDate field's value.
+func (s *CapacityReservation) SetStartDate(v time.Time) *CapacityReservation {
+ s.StartDate = &v
+ return s
+}
+
// SetState sets the State field's value.
func (s *CapacityReservation) SetState(v string) *CapacityReservation {
s.State = &v
@@ -46213,7 +49383,7 @@ type CapacityReservationGroup struct {
// The ARN of the resource group.
GroupArn *string `locationName:"groupArn" type:"string"`
- // The ID of the AWS account that owns the resource group.
+ // The ID of the Amazon Web Services account that owns the resource group.
OwnerId *string `locationName:"ownerId" type:"string"`
}
@@ -46246,9 +49416,9 @@ func (s *CapacityReservationGroup) SetOwnerId(v string) *CapacityReservationGrou
//
// For more information about Capacity Reservations, see On-Demand Capacity
// Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html)
-// in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity
-// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide. For examples of using Capacity Reservations
+// in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html)
+// in the Amazon EC2 User Guide.
type CapacityReservationOptions struct {
_ struct{} `type:"structure"`
@@ -46291,9 +49461,9 @@ func (s *CapacityReservationOptions) SetUsageStrategy(v string) *CapacityReserva
//
// For more information about Capacity Reservations, see On-Demand Capacity
// Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html)
-// in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity
-// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide. For examples of using Capacity Reservations
+// in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html)
+// in the Amazon EC2 User Guide.
type CapacityReservationOptionsRequest struct {
_ struct{} `type:"structure"`
@@ -46498,7 +49668,7 @@ type CarrierGateway struct {
// The ID of the carrier gateway.
CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"`
- // The AWS account ID of the owner of the carrier gateway.
+ // The Amazon Web Services account ID of the owner of the carrier gateway.
OwnerId *string `locationName:"ownerId" type:"string"`
// The state of the carrier gateway.
@@ -46601,9 +49771,8 @@ func (s *CertificateAuthenticationRequest) SetClientRootCertificateChainArn(v st
}
// Provides authorization for Amazon to bring a specific IP address range to
-// a specific AWS account using bring your own IP addresses (BYOIP). For more
-// information, see Prepare to Bring Your Address Range to Your AWS Account
-// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#prepare-for-byoip)
+// a specific Amazon Web Services account using bring your own IP addresses
+// (BYOIP). For more information, see Configuring your BYOIP address range (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#prepare-for-byoip)
// in the Amazon Elastic Compute Cloud User Guide.
type CidrAuthorizationContext struct {
_ struct{} `type:"structure"`
@@ -47893,8 +51062,8 @@ func (s *ConfirmProductInstanceInput) SetProductCode(v string) *ConfirmProductIn
type ConfirmProductInstanceOutput struct {
_ struct{} `type:"structure"`
- // The AWS account ID of the instance owner. This is only present if the product
- // code is attached to the instance.
+ // The Amazon Web Services account ID of the instance owner. This is only present
+ // if the product code is attached to the instance.
OwnerId *string `locationName:"ownerId" type:"string"`
// The return value of the request. Returns true if the specified product code
@@ -48294,13 +51463,23 @@ type CopyImageInput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier you provide to ensure idempotency of the
- // request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html)
+ // in the Amazon EC2 API Reference.
ClientToken *string `type:"string"`
// A description for the new AMI in the destination Region.
Description *string `type:"string"`
+ // The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only
+ // specify this parameter when copying an AMI from an AWS Region to an Outpost.
+ // The AMI must be in the Region of the destination Outpost. You cannot copy
+ // an AMI from an Outpost to a Region, from one Outpost to another, or within
+ // the same Outpost.
+ //
+ // For more information, see Copying AMIs from an AWS Region to an Outpost (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#copy-amis)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ DestinationOutpostArn *string `type:"string"`
+
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
@@ -48396,6 +51575,12 @@ func (s *CopyImageInput) SetDescription(v string) *CopyImageInput {
return s
}
+// SetDestinationOutpostArn sets the DestinationOutpostArn field's value.
+func (s *CopyImageInput) SetDestinationOutpostArn(v string) *CopyImageInput {
+ s.DestinationOutpostArn = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *CopyImageInput) SetDryRun(v bool) *CopyImageInput {
s.DryRun = &v
@@ -48462,14 +51647,25 @@ type CopySnapshotInput struct {
// A description for the EBS snapshot.
Description *string `type:"string"`
+ // The Amazon Resource Name (ARN) of the Outpost to which to copy the snapshot.
+ // Only specify this parameter when copying a snapshot from an Amazon Web Services
+ // Region to an Outpost. The snapshot must be in the Region for the destination
+ // Outpost. You cannot copy a snapshot from an Outpost to a Region, from one
+ // Outpost to another, or within the same Outpost.
+ //
+ // For more information, see Copy snapshots from an Amazon Web Services Region
+ // to an Outpost (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#copy-snapshots)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ DestinationOutpostArn *string `type:"string"`
+
// The destination Region to use in the PresignedUrl parameter of a snapshot
// copy operation. This parameter is only valid for specifying the destination
// Region in a PresignedUrl parameter, where it is required.
//
// The snapshot copy is sent to the regional endpoint that you sent the HTTP
- // request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI,
- // this is specified using the --region parameter or the default Region in your
- // AWS configuration file.
+ // request to (for example, ec2.us-east-1.amazonaws.com). With the CLI, this
+ // is specified using the --region parameter or the default Region in your Amazon
+ // Web Services configuration file.
DestinationRegion *string `locationName:"destinationRegion" type:"string"`
// Checks whether you have the required permissions for the action, without
@@ -48486,12 +51682,11 @@ type CopySnapshotInput struct {
// in the Amazon Elastic Compute Cloud User Guide.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
- // The identifier of the AWS Key Management Service (AWS KMS) customer master
- // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified,
- // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted
- // state must be true.
+ // The identifier of the Key Management Service (KMS) KMS key to use for Amazon
+ // EBS encryption. If this parameter is not specified, your KMS key for Amazon
+ // EBS is used. If KmsKeyId is specified, the encrypted state must be true.
//
- // You can specify the CMK using any of the following:
+ // You can specify the KMS key using any of the following:
//
// * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.
//
@@ -48501,9 +51696,9 @@ type CopySnapshotInput struct {
//
// * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
- // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID,
- // alias, or ARN that is not valid, the action can appear to complete, but eventually
- // fails.
+ // Amazon Web Services authenticates the KMS key asynchronously. Therefore,
+ // if you specify an ID, alias, or ARN that is not valid, the action can appear
+ // to complete, but eventually fails.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// When you copy an encrypted source snapshot using the Amazon EC2 Query API,
@@ -48512,10 +51707,11 @@ type CopySnapshotInput struct {
//
// The PresignedUrl should use the snapshot source endpoint, the CopySnapshot
// action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion
- // parameters. The PresignedUrl must be signed using AWS Signature Version 4.
- // Because EBS snapshots are stored in Amazon S3, the signing algorithm for
- // this parameter uses the same logic that is described in Authenticating Requests:
- // Using Query Parameters (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
+ // parameters. The PresignedUrl must be signed using Amazon Web Services Signature
+ // Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm
+ // for this parameter uses the same logic that is described in Authenticating
+ // Requests: Using Query Parameters (Amazon Web Services Signature Version 4)
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
// in the Amazon Simple Storage Service API Reference. An invalid or improperly
// signed PresignedUrl will cause the copy operation to fail asynchronously,
// and the snapshot will move to an error state.
@@ -48567,6 +51763,12 @@ func (s *CopySnapshotInput) SetDescription(v string) *CopySnapshotInput {
return s
}
+// SetDestinationOutpostArn sets the DestinationOutpostArn field's value.
+func (s *CopySnapshotInput) SetDestinationOutpostArn(v string) *CopySnapshotInput {
+ s.DestinationOutpostArn = &v
+ return s
+}
+
// SetDestinationRegion sets the DestinationRegion field's value.
func (s *CopySnapshotInput) SetDestinationRegion(v string) *CopySnapshotInput {
s.DestinationRegion = &v
@@ -48725,7 +51927,7 @@ type CreateCapacityReservationInput struct {
AvailabilityZoneId *string `type:"string"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `type:"string"`
// Checks whether you have the required permissions for the action, without
@@ -48772,6 +51974,8 @@ type CreateCapacityReservationInput struct {
// The number of instances for which to reserve capacity.
//
+ // Valid range: 1 - 1000
+ //
// InstanceCount is a required field
InstanceCount *int64 `type:"integer" required:"true"`
@@ -48797,12 +52001,16 @@ type CreateCapacityReservationInput struct {
InstancePlatform *string `type:"string" required:"true" enum:"CapacityReservationInstancePlatform"`
// The instance type for which to reserve capacity. For more information, see
- // Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ // in the Amazon EC2 User Guide.
//
// InstanceType is a required field
InstanceType *string `type:"string" required:"true"`
+ // The Amazon Resource Name (ARN) of the Outpost on which to create the Capacity
+ // Reservation.
+ OutpostArn *string `type:"string"`
+
// The tags to apply to the Capacity Reservation during launch.
TagSpecifications []*TagSpecification `locationNameList:"item" type:"list"`
@@ -48810,10 +52018,10 @@ type CreateCapacityReservationInput struct {
// can have one of the following tenancy settings:
//
// * default - The Capacity Reservation is created on hardware that is shared
- // with other AWS accounts.
+ // with other Amazon Web Services accounts.
//
// * dedicated - The Capacity Reservation is created on single-tenant hardware
- // that is dedicated to a single AWS account.
+ // that is dedicated to a single Amazon Web Services account.
Tenancy *string `type:"string" enum:"CapacityReservationTenancy"`
}
@@ -48918,6 +52126,12 @@ func (s *CreateCapacityReservationInput) SetInstanceType(v string) *CreateCapaci
return s
}
+// SetOutpostArn sets the OutpostArn field's value.
+func (s *CreateCapacityReservationInput) SetOutpostArn(v string) *CreateCapacityReservationInput {
+ s.OutpostArn = &v
+ return s
+}
+
// SetTagSpecifications sets the TagSpecifications field's value.
func (s *CreateCapacityReservationInput) SetTagSpecifications(v []*TagSpecification) *CreateCapacityReservationInput {
s.TagSpecifications = v
@@ -48957,7 +52171,7 @@ type CreateCarrierGatewayInput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
ClientToken *string `type:"string" idempotencyToken:"true"`
// Checks whether you have the required permissions for the action, without
@@ -49783,7 +52997,7 @@ type CreateEgressOnlyInternetGatewayInput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
ClientToken *string `type:"string"`
// Checks whether you have the required permissions for the action, without
@@ -49944,6 +53158,9 @@ type CreateFleetInput struct {
// of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `type:"string"`
+ // Reserved.
+ Context *string `type:"string"`
+
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
@@ -49963,16 +53180,24 @@ type CreateFleetInput struct {
// Describes the configuration of On-Demand Instances in an EC2 Fleet.
OnDemandOptions *OnDemandOptionsRequest `type:"structure"`
- // Indicates whether EC2 Fleet should replace unhealthy instances.
+ // Indicates whether EC2 Fleet should replace unhealthy Spot Instances. Supported
+ // only for fleets of type maintain. For more information, see EC2 Fleet health
+ // checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#ec2-fleet-health-checks)
+ // in the Amazon EC2 User Guide.
ReplaceUnhealthyInstances *bool `type:"boolean"`
// Describes the configuration of Spot Instances in an EC2 Fleet.
SpotOptions *SpotOptionsRequest `type:"structure"`
- // The key-value pair for tagging the EC2 Fleet request on creation. The value
- // for ResourceType must be fleet, otherwise the fleet request fails. To tag
- // instances at launch, specify the tags in the launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template).
- // For information about tagging after launch, see Tagging your resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources).
+ // The key-value pair for tagging the EC2 Fleet request on creation. For more
+ // information, see Tagging your resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources).
+ //
+ // If the fleet type is instant, specify a resource type of fleet to tag the
+ // fleet or instance to tag the instances at launch.
+ //
+ // If the fleet type is maintain or request, specify a resource type of fleet
+ // to tag the fleet. You cannot specify a resource type of instance. To tag
+ // instances at launch, specify the tags in a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template).
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
// The number of units to request.
@@ -49984,9 +53209,9 @@ type CreateFleetInput struct {
// expires.
TerminateInstancesWithExpiration *bool `type:"boolean"`
- // The type of request. The default value is maintain.
+ // The fleet type. The default value is maintain.
//
- // * maintain - The EC2 Fleet plaees an asynchronous request for your desired
+ // * maintain - The EC2 Fleet places an asynchronous request for your desired
// capacity, and continues to maintain your desired Spot capacity by replenishing
// interrupted Spot Instances.
//
@@ -50000,7 +53225,7 @@ type CreateFleetInput struct {
// be launched.
//
// For more information, see EC2 Fleet request types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-request-type)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
Type *string `type:"string" enum:"FleetType"`
// The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
@@ -50060,6 +53285,12 @@ func (s *CreateFleetInput) SetClientToken(v string) *CreateFleetInput {
return s
}
+// SetContext sets the Context field's value.
+func (s *CreateFleetInput) SetContext(v string) *CreateFleetInput {
+ s.Context = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *CreateFleetInput) SetDryRun(v bool) *CreateFleetInput {
s.DryRun = &v
@@ -50199,14 +53430,14 @@ type CreateFleetOutput struct {
_ struct{} `type:"structure"`
// Information about the instances that could not be launched by the fleet.
- // Valid only when Type is set to instant.
+ // Supported only for fleets of type instant.
Errors []*CreateFleetError `locationName:"errorSet" locationNameList:"item" type:"list"`
// The ID of the EC2 Fleet.
FleetId *string `locationName:"fleetId" type:"string"`
- // Information about the instances that were launched by the fleet. Valid only
- // when Type is set to instant.
+ // Information about the instances that were launched by the fleet. Supported
+ // only for fleets of type instant.
Instances []*CreateFleetInstance `locationName:"fleetInstanceSet" locationNameList:"item" type:"list"`
}
@@ -50242,7 +53473,7 @@ type CreateFlowLogsInput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
ClientToken *string `type:"string"`
// The ARN for the IAM role that permits Amazon EC2 to publish flow logs to
@@ -50288,12 +53519,12 @@ type CreateFlowLogsInput struct {
LogDestinationType *string `type:"string" enum:"LogDestinationType"`
// The fields to include in the flow log record, in the order in which they
- // should appear. For a list of available fields, see Flow Log Records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records).
+ // should appear. For a list of available fields, see Flow log records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records).
// If you omit this parameter, the flow log is created using the default format.
// If you specify this parameter, you must specify at least one field.
//
// Specify the fields using the ${field-id} format, separated by spaces. For
- // the AWS CLI, use single quotation marks (' ') to surround the parameter value.
+ // the CLI, use single quotation marks (' ') to surround the parameter value.
LogFormat *string `type:"string"`
// The name of a new or existing CloudWatch Logs log group where Amazon EC2
@@ -50754,6 +53985,113 @@ func (s *CreateImageOutput) SetImageId(v string) *CreateImageOutput {
return s
}
+type CreateInstanceEventWindowInput struct {
+ _ struct{} `type:"structure"`
+
+ // The cron expression for the event window, for example, * 0-4,20-23 * * 1,5.
+ // If you specify a cron expression, you can't specify a time range.
+ //
+ // Constraints:
+ //
+ // * Only hour and day of the week values are supported.
+ //
+ // * For day of the week values, you can specify either integers 0 through
+ // 6, or alternative single values SUN through SAT.
+ //
+ // * The minute, month, and year must be specified by *.
+ //
+ // * The hour value must be one or a multiple range, for example, 0-4 or
+ // 0-4,20-23.
+ //
+ // * Each hour range must be >= 2 hours, for example, 0-2 or 20-23.
+ //
+ // * The event window must be >= 4 hours. The combined total time ranges
+ // in the event window must be >= 4 hours.
+ //
+ // For more information about cron expressions, see cron (https://en.wikipedia.org/wiki/Cron)
+ // on the Wikipedia website.
+ CronExpression *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The name of the event window.
+ Name *string `type:"string"`
+
+ // The tags to apply to the event window.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+
+ // The time range for the event window. If you specify a time range, you can't
+ // specify a cron expression.
+ TimeRanges []*InstanceEventWindowTimeRangeRequest `locationName:"TimeRange" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateInstanceEventWindowInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstanceEventWindowInput) GoString() string {
+ return s.String()
+}
+
+// SetCronExpression sets the CronExpression field's value.
+func (s *CreateInstanceEventWindowInput) SetCronExpression(v string) *CreateInstanceEventWindowInput {
+ s.CronExpression = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateInstanceEventWindowInput) SetDryRun(v bool) *CreateInstanceEventWindowInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *CreateInstanceEventWindowInput) SetName(v string) *CreateInstanceEventWindowInput {
+ s.Name = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateInstanceEventWindowInput) SetTagSpecifications(v []*TagSpecification) *CreateInstanceEventWindowInput {
+ s.TagSpecifications = v
+ return s
+}
+
+// SetTimeRanges sets the TimeRanges field's value.
+func (s *CreateInstanceEventWindowInput) SetTimeRanges(v []*InstanceEventWindowTimeRangeRequest) *CreateInstanceEventWindowInput {
+ s.TimeRanges = v
+ return s
+}
+
+type CreateInstanceEventWindowOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the event window.
+ InstanceEventWindow *InstanceEventWindow `locationName:"instanceEventWindow" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateInstanceEventWindowOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstanceEventWindowOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceEventWindow sets the InstanceEventWindow field's value.
+func (s *CreateInstanceEventWindowOutput) SetInstanceEventWindow(v *InstanceEventWindow) *CreateInstanceEventWindowOutput {
+ s.InstanceEventWindow = v
+ return s
+}
+
type CreateInstanceExportTaskInput struct {
_ struct{} `type:"structure"`
@@ -50761,7 +54099,7 @@ type CreateInstanceExportTaskInput struct {
// maximum length is 255 characters.
Description *string `locationName:"description" type:"string"`
- // The format and location for an instance export task.
+ // The format and location for an export instance task.
//
// ExportToS3Task is a required field
ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure" required:"true"`
@@ -50771,7 +54109,7 @@ type CreateInstanceExportTaskInput struct {
// InstanceId is a required field
InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
- // The tags to apply to the instance export task during creation.
+ // The tags to apply to the export instance task during creation.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
// The target virtualization environment.
@@ -50842,7 +54180,7 @@ func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateIn
type CreateInstanceExportTaskOutput struct {
_ struct{} `type:"structure"`
- // Information about the instance export task.
+ // Information about the export instance task.
ExportTask *ExportTask `locationName:"exportTask" type:"structure"`
}
@@ -50936,6 +54274,12 @@ type CreateKeyPairInput struct {
// KeyName is a required field
KeyName *string `type:"string" required:"true"`
+ // The type of key pair. Note that ED25519 keys are not supported for Windows
+ // instances, EC2 Instance Connect, and EC2 Serial Console.
+ //
+ // Default: rsa
+ KeyType *string `type:"string" enum:"KeyType"`
+
// The tags to apply to the new key pair.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
}
@@ -50975,6 +54319,12 @@ func (s *CreateKeyPairInput) SetKeyName(v string) *CreateKeyPairInput {
return s
}
+// SetKeyType sets the KeyType field's value.
+func (s *CreateKeyPairInput) SetKeyType(v string) *CreateKeyPairInput {
+ s.KeyType = &v
+ return s
+}
+
// SetTagSpecifications sets the TagSpecifications field's value.
func (s *CreateKeyPairInput) SetTagSpecifications(v []*TagSpecification) *CreateKeyPairInput {
s.TagSpecifications = v
@@ -50988,7 +54338,7 @@ type CreateKeyPairOutput struct {
// The SHA-1 digest of the DER encoded private key.
KeyFingerprint *string `locationName:"keyFingerprint" type:"string"`
- // An unencrypted PEM encoded RSA private key.
+ // An unencrypted PEM encoded RSA or ED25519 private key.
KeyMaterial *string `locationName:"keyMaterial" type:"string" sensitive:"true"`
// The name of the key pair.
@@ -51669,19 +55019,22 @@ func (s *CreateManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *Cre
type CreateNatGatewayInput struct {
_ struct{} `type:"structure"`
- // The allocation ID of an Elastic IP address to associate with the NAT gateway.
- // If the Elastic IP address is associated with another resource, you must first
- // disassociate it.
- //
- // AllocationId is a required field
- AllocationId *string `type:"string" required:"true"`
+ // [Public NAT gateways only] The allocation ID of an Elastic IP address to
+ // associate with the NAT gateway. You cannot specify an Elastic IP address
+ // with a private NAT gateway. If the Elastic IP address is associated with
+ // another resource, you must first disassociate it.
+ AllocationId *string `type:"string"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
//
// Constraint: Maximum 64 ASCII characters.
ClientToken *string `type:"string" idempotencyToken:"true"`
+ // Indicates whether the NAT gateway supports public or private connectivity.
+ // The default is public connectivity.
+ ConnectivityType *string `type:"string" enum:"ConnectivityType"`
+
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
@@ -51710,9 +55063,6 @@ func (s CreateNatGatewayInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateNatGatewayInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateNatGatewayInput"}
- if s.AllocationId == nil {
- invalidParams.Add(request.NewErrParamRequired("AllocationId"))
- }
if s.SubnetId == nil {
invalidParams.Add(request.NewErrParamRequired("SubnetId"))
}
@@ -51735,6 +55085,12 @@ func (s *CreateNatGatewayInput) SetClientToken(v string) *CreateNatGatewayInput
return s
}
+// SetConnectivityType sets the ConnectivityType field's value.
+func (s *CreateNatGatewayInput) SetConnectivityType(v string) *CreateNatGatewayInput {
+ s.ConnectivityType = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *CreateNatGatewayInput) SetDryRun(v bool) *CreateNatGatewayInput {
s.DryRun = &v
@@ -52043,15 +55399,16 @@ type CreateNetworkInsightsPathInput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `type:"string" idempotencyToken:"true"`
- // The AWS resource that is the destination of the path.
+ // The Amazon Web Services resource that is the destination of the path.
//
// Destination is a required field
Destination *string `type:"string" required:"true"`
- // The IP address of the AWS resource that is the destination of the path.
+ // The IP address of the Amazon Web Services resource that is the destination
+ // of the path.
DestinationIp *string `type:"string"`
// The destination port.
@@ -52068,12 +55425,13 @@ type CreateNetworkInsightsPathInput struct {
// Protocol is a required field
Protocol *string `type:"string" required:"true" enum:"Protocol"`
- // The AWS resource that is the source of the path.
+ // The Amazon Web Services resource that is the source of the path.
//
// Source is a required field
Source *string `type:"string" required:"true"`
- // The IP address of the AWS resource that is the source of the path.
+ // The IP address of the Amazon Web Services resource that is the source of
+ // the path.
SourceIp *string `type:"string"`
// The tags to add to the path.
@@ -52193,6 +55551,10 @@ func (s *CreateNetworkInsightsPathOutput) SetNetworkInsightsPath(v *NetworkInsig
type CreateNetworkInterfaceInput struct {
_ struct{} `type:"structure"`
+ // Unique, case-sensitive identifier that you provide to ensure the idempotency
+ // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
// A description for the network interface.
Description *string `locationName:"description" type:"string"`
@@ -52207,9 +55569,21 @@ type CreateNetworkInterfaceInput struct {
// Indicates the type of network interface. To create an Elastic Fabric Adapter
// (EFA), specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon Elastic Compute Cloud User Guide. To create a trunk network
+ // interface, specify efa. For more information, see Network interface trunking
+ // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/eni-trunking.html) in
+ // the Amazon Elastic Compute Cloud User Guide.
InterfaceType *string `type:"string" enum:"NetworkInterfaceCreationType"`
+ // The number of IPv4 prefixes that Amazon Web Services automatically assigns
+ // to the network interface. You cannot use this option if you use the Ipv4
+ // Prefixes option.
+ Ipv4PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv4 prefixes assigned to the network interface. You cannot use
+ // this option if you use the Ipv4PrefixCount option.
+ Ipv4Prefixes []*Ipv4PrefixSpecificationRequest `locationName:"Ipv4Prefix" locationNameList:"item" type:"list"`
+
// The number of IPv6 addresses to assign to a network interface. Amazon EC2
// automatically selects the IPv6 addresses from the subnet range. You can't
// use this option if specifying specific IPv6 addresses. If your subnet has
@@ -52221,6 +55595,15 @@ type CreateNetworkInterfaceInput struct {
// subnet. You can't use this option if you're specifying a number of IPv6 addresses.
Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6Addresses" locationNameList:"item" type:"list"`
+ // The number of IPv6 prefixes that Amazon Web Services automatically assigns
+ // to the network interface. You cannot use this option if you use the Ipv6Prefixes
+ // option.
+ Ipv6PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv6 prefixes assigned to the network interface. You cannot use
+ // this option if you use the Ipv6PrefixCount option.
+ Ipv6Prefixes []*Ipv6PrefixSpecificationRequest `locationName:"Ipv6Prefix" locationNameList:"item" type:"list"`
+
// The primary private IPv4 address of the network interface. If you don't specify
// an IPv4 address, Amazon EC2 selects one for you from the subnet's IPv4 CIDR
// range. If you specify an IP address, you cannot indicate any IP addresses
@@ -52274,6 +55657,12 @@ func (s *CreateNetworkInterfaceInput) Validate() error {
return nil
}
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateNetworkInterfaceInput) SetClientToken(v string) *CreateNetworkInterfaceInput {
+ s.ClientToken = &v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *CreateNetworkInterfaceInput) SetDescription(v string) *CreateNetworkInterfaceInput {
s.Description = &v
@@ -52298,6 +55687,18 @@ func (s *CreateNetworkInterfaceInput) SetInterfaceType(v string) *CreateNetworkI
return s
}
+// SetIpv4PrefixCount sets the Ipv4PrefixCount field's value.
+func (s *CreateNetworkInterfaceInput) SetIpv4PrefixCount(v int64) *CreateNetworkInterfaceInput {
+ s.Ipv4PrefixCount = &v
+ return s
+}
+
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *CreateNetworkInterfaceInput) SetIpv4Prefixes(v []*Ipv4PrefixSpecificationRequest) *CreateNetworkInterfaceInput {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetIpv6AddressCount sets the Ipv6AddressCount field's value.
func (s *CreateNetworkInterfaceInput) SetIpv6AddressCount(v int64) *CreateNetworkInterfaceInput {
s.Ipv6AddressCount = &v
@@ -52310,6 +55711,18 @@ func (s *CreateNetworkInterfaceInput) SetIpv6Addresses(v []*InstanceIpv6Address)
return s
}
+// SetIpv6PrefixCount sets the Ipv6PrefixCount field's value.
+func (s *CreateNetworkInterfaceInput) SetIpv6PrefixCount(v int64) *CreateNetworkInterfaceInput {
+ s.Ipv6PrefixCount = &v
+ return s
+}
+
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *CreateNetworkInterfaceInput) SetIpv6Prefixes(v []*Ipv6PrefixSpecificationRequest) *CreateNetworkInterfaceInput {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetPrivateIpAddress sets the PrivateIpAddress field's value.
func (s *CreateNetworkInterfaceInput) SetPrivateIpAddress(v string) *CreateNetworkInterfaceInput {
s.PrivateIpAddress = &v
@@ -52344,6 +55757,10 @@ func (s *CreateNetworkInterfaceInput) SetTagSpecifications(v []*TagSpecification
type CreateNetworkInterfaceOutput struct {
_ struct{} `type:"structure"`
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ ClientToken *string `locationName:"clientToken" type:"string"`
+
// Information about the network interface.
NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"`
}
@@ -52358,6 +55775,12 @@ func (s CreateNetworkInterfaceOutput) GoString() string {
return s.String()
}
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateNetworkInterfaceOutput) SetClientToken(v string) *CreateNetworkInterfaceOutput {
+ s.ClientToken = &v
+ return s
+}
+
// SetNetworkInterface sets the NetworkInterface field's value.
func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface) *CreateNetworkInterfaceOutput {
s.NetworkInterface = v
@@ -52368,10 +55791,10 @@ func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface)
type CreateNetworkInterfacePermissionInput struct {
_ struct{} `type:"structure"`
- // The AWS account ID.
+ // The Amazon Web Services account ID.
AwsAccountId *string `type:"string"`
- // The AWS service. Currently not supported.
+ // The Amazon Web Service. Currently not supported.
AwsService *string `type:"string"`
// Checks whether you have the required permissions for the action, without
@@ -52559,6 +55982,111 @@ func (s *CreatePlacementGroupOutput) SetPlacementGroup(v *PlacementGroup) *Creat
return s
}
+type CreateReplaceRootVolumeTaskInput struct {
+ _ struct{} `type:"structure"`
+
+ // Unique, case-sensitive identifier you provide to ensure the idempotency of
+ // the request. If you do not specify a client token, a randomly generated token
+ // is used for the request to ensure idempotency. For more information, see
+ // Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the instance for which to replace the root volume.
+ //
+ // InstanceId is a required field
+ InstanceId *string `type:"string" required:"true"`
+
+ // The ID of the snapshot from which to restore the replacement root volume.
+ // If you want to restore the volume to the initial launch state, omit this
+ // parameter.
+ SnapshotId *string `type:"string"`
+
+ // The tags to apply to the root volume replacement task.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateReplaceRootVolumeTaskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateReplaceRootVolumeTaskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateReplaceRootVolumeTaskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateReplaceRootVolumeTaskInput"}
+ if s.InstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateReplaceRootVolumeTaskInput) SetClientToken(v string) *CreateReplaceRootVolumeTaskInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateReplaceRootVolumeTaskInput) SetDryRun(v bool) *CreateReplaceRootVolumeTaskInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetInstanceId sets the InstanceId field's value.
+func (s *CreateReplaceRootVolumeTaskInput) SetInstanceId(v string) *CreateReplaceRootVolumeTaskInput {
+ s.InstanceId = &v
+ return s
+}
+
+// SetSnapshotId sets the SnapshotId field's value.
+func (s *CreateReplaceRootVolumeTaskInput) SetSnapshotId(v string) *CreateReplaceRootVolumeTaskInput {
+ s.SnapshotId = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateReplaceRootVolumeTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateReplaceRootVolumeTaskInput {
+ s.TagSpecifications = v
+ return s
+}
+
+type CreateReplaceRootVolumeTaskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the root volume replacement task.
+ ReplaceRootVolumeTask *ReplaceRootVolumeTask `locationName:"replaceRootVolumeTask" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateReplaceRootVolumeTaskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateReplaceRootVolumeTaskOutput) GoString() string {
+ return s.String()
+}
+
+// SetReplaceRootVolumeTask sets the ReplaceRootVolumeTask field's value.
+func (s *CreateReplaceRootVolumeTaskOutput) SetReplaceRootVolumeTask(v *ReplaceRootVolumeTask) *CreateReplaceRootVolumeTaskOutput {
+ s.ReplaceRootVolumeTask = v
+ return s
+}
+
// Contains the parameters for CreateReservedInstancesListing.
type CreateReservedInstancesListingInput struct {
_ struct{} `type:"structure"`
@@ -52670,6 +56198,119 @@ func (s *CreateReservedInstancesListingOutput) SetReservedInstancesListings(v []
return s
}
+type CreateRestoreImageTaskInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the S3 bucket that contains the stored AMI object.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The name for the restored AMI. The name must be unique for AMIs in the Region
+ // for this account. If you do not provide a name, the new AMI gets the same
+ // name as the original AMI.
+ Name *string `type:"string"`
+
+ // The name of the stored AMI object in the bucket.
+ //
+ // ObjectKey is a required field
+ ObjectKey *string `type:"string" required:"true"`
+
+ // The tags to apply to the AMI and snapshots on restoration. You can tag the
+ // AMI, the snapshots, or both.
+ //
+ // * To tag the AMI, the value for ResourceType must be image.
+ //
+ // * To tag the snapshots, the value for ResourceType must be snapshot. The
+ // same tag is applied to all of the snapshots that are created.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateRestoreImageTaskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRestoreImageTaskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateRestoreImageTaskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateRestoreImageTaskInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.ObjectKey == nil {
+ invalidParams.Add(request.NewErrParamRequired("ObjectKey"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateRestoreImageTaskInput) SetBucket(v string) *CreateRestoreImageTaskInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateRestoreImageTaskInput) SetDryRun(v bool) *CreateRestoreImageTaskInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *CreateRestoreImageTaskInput) SetName(v string) *CreateRestoreImageTaskInput {
+ s.Name = &v
+ return s
+}
+
+// SetObjectKey sets the ObjectKey field's value.
+func (s *CreateRestoreImageTaskInput) SetObjectKey(v string) *CreateRestoreImageTaskInput {
+ s.ObjectKey = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateRestoreImageTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateRestoreImageTaskInput {
+ s.TagSpecifications = v
+ return s
+}
+
+type CreateRestoreImageTaskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The AMI ID.
+ ImageId *string `locationName:"imageId" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateRestoreImageTaskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRestoreImageTaskOutput) GoString() string {
+ return s.String()
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *CreateRestoreImageTaskOutput) SetImageId(v string) *CreateRestoreImageTaskOutput {
+ s.ImageId = &v
+ return s
+}
+
type CreateRouteInput struct {
_ struct{} `type:"structure"`
@@ -53089,10 +56730,29 @@ type CreateSnapshotInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
+ // The Amazon Resource Name (ARN) of the Outpost on which to create a local
+ // snapshot.
+ //
+ // * To create a snapshot of a volume in a Region, omit this parameter. The
+ // snapshot is created in the same Region as the volume.
+ //
+ // * To create a snapshot of a volume on an Outpost and store the snapshot
+ // in the Region, omit this parameter. The snapshot is created in the Region
+ // for the Outpost.
+ //
+ // * To create a snapshot of a volume on an Outpost and store the snapshot
+ // on an Outpost, specify the ARN of the destination Outpost. The snapshot
+ // must be created on the same Outpost as the volume.
+ //
+ // For more information, see Create local snapshots from volumes on an Outpost
+ // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#create-snapshot)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ OutpostArn *string `type:"string"`
+
// The tags to apply to the snapshot during creation.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
- // The ID of the EBS volume.
+ // The ID of the Amazon EBS volume.
//
// VolumeId is a required field
VolumeId *string `type:"string" required:"true"`
@@ -53133,6 +56793,12 @@ func (s *CreateSnapshotInput) SetDryRun(v bool) *CreateSnapshotInput {
return s
}
+// SetOutpostArn sets the OutpostArn field's value.
+func (s *CreateSnapshotInput) SetOutpostArn(v string) *CreateSnapshotInput {
+ s.OutpostArn = &v
+ return s
+}
+
// SetTagSpecifications sets the TagSpecifications field's value.
func (s *CreateSnapshotInput) SetTagSpecifications(v []*TagSpecification) *CreateSnapshotInput {
s.TagSpecifications = v
@@ -53165,6 +56831,25 @@ type CreateSnapshotsInput struct {
// InstanceSpecification is a required field
InstanceSpecification *InstanceSpecification `type:"structure" required:"true"`
+ // The Amazon Resource Name (ARN) of the Outpost on which to create the local
+ // snapshots.
+ //
+ // * To create snapshots from an instance in a Region, omit this parameter.
+ // The snapshots are created in the same Region as the instance.
+ //
+ // * To create snapshots from an instance on an Outpost and store the snapshots
+ // in the Region, omit this parameter. The snapshots are created in the Region
+ // for the Outpost.
+ //
+ // * To create snapshots from an instance on an Outpost and store the snapshots
+ // on an Outpost, specify the ARN of the destination Outpost. The snapshots
+ // must be created on the same Outpost as the instance.
+ //
+ // For more information, see Create multi-volume local snapshots from instances
+ // on an Outpost (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#create-multivol-snapshot)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ OutpostArn *string `type:"string"`
+
// Tags to apply to every snapshot specified by the instance.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
}
@@ -53216,6 +56901,12 @@ func (s *CreateSnapshotsInput) SetInstanceSpecification(v *InstanceSpecification
return s
}
+// SetOutpostArn sets the OutpostArn field's value.
+func (s *CreateSnapshotsInput) SetOutpostArn(v string) *CreateSnapshotsInput {
+ s.OutpostArn = &v
+ return s
+}
+
// SetTagSpecifications sets the TagSpecifications field's value.
func (s *CreateSnapshotsInput) SetTagSpecifications(v []*TagSpecification) *CreateSnapshotsInput {
s.TagSpecifications = v
@@ -53332,13 +57023,242 @@ func (s *CreateSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v *Sp
return s
}
+type CreateStoreImageTaskInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the S3 bucket in which the AMI object will be stored. The bucket
+ // must be in the Region in which the request is being made. The AMI object
+ // appears in the bucket only after the upload task has completed.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the AMI.
+ //
+ // ImageId is a required field
+ ImageId *string `type:"string" required:"true"`
+
+ // The tags to apply to the AMI object that will be stored in the S3 bucket.
+ S3ObjectTags []*S3ObjectTag `locationName:"S3ObjectTag" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateStoreImageTaskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateStoreImageTaskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateStoreImageTaskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateStoreImageTaskInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.ImageId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateStoreImageTaskInput) SetBucket(v string) *CreateStoreImageTaskInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateStoreImageTaskInput) SetDryRun(v bool) *CreateStoreImageTaskInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *CreateStoreImageTaskInput) SetImageId(v string) *CreateStoreImageTaskInput {
+ s.ImageId = &v
+ return s
+}
+
+// SetS3ObjectTags sets the S3ObjectTags field's value.
+func (s *CreateStoreImageTaskInput) SetS3ObjectTags(v []*S3ObjectTag) *CreateStoreImageTaskInput {
+ s.S3ObjectTags = v
+ return s
+}
+
+type CreateStoreImageTaskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the stored AMI object in the S3 bucket.
+ ObjectKey *string `locationName:"objectKey" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateStoreImageTaskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateStoreImageTaskOutput) GoString() string {
+ return s.String()
+}
+
+// SetObjectKey sets the ObjectKey field's value.
+func (s *CreateStoreImageTaskOutput) SetObjectKey(v string) *CreateStoreImageTaskOutput {
+ s.ObjectKey = &v
+ return s
+}
+
+type CreateSubnetCidrReservationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The IPv4 or IPV6 CIDR range to reserve.
+ //
+ // Cidr is a required field
+ Cidr *string `type:"string" required:"true"`
+
+ // The description to assign to the subnet CIDR reservation.
+ Description *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The type of reservation.
+ //
+ // The following are valid values:
+ //
+ // * prefix: The Amazon EC2 Prefix Delegation feature assigns the IP addresses
+ // to network interfaces that are associated with an instance. For information
+ // about Prefix Delegation, see Prefix Delegation for Amazon EC2 network
+ // interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-delegation.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ //
+ // * explicit: You manually assign the IP addresses to resources that reside
+ // in your subnet.
+ //
+ // ReservationType is a required field
+ ReservationType *string `type:"string" required:"true" enum:"SubnetCidrReservationType"`
+
+ // The ID of the subnet.
+ //
+ // SubnetId is a required field
+ SubnetId *string `type:"string" required:"true"`
+
+ // The tags to assign to the subnet CIDR reservation.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateSubnetCidrReservationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateSubnetCidrReservationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateSubnetCidrReservationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateSubnetCidrReservationInput"}
+ if s.Cidr == nil {
+ invalidParams.Add(request.NewErrParamRequired("Cidr"))
+ }
+ if s.ReservationType == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReservationType"))
+ }
+ if s.SubnetId == nil {
+ invalidParams.Add(request.NewErrParamRequired("SubnetId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCidr sets the Cidr field's value.
+func (s *CreateSubnetCidrReservationInput) SetCidr(v string) *CreateSubnetCidrReservationInput {
+ s.Cidr = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *CreateSubnetCidrReservationInput) SetDescription(v string) *CreateSubnetCidrReservationInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateSubnetCidrReservationInput) SetDryRun(v bool) *CreateSubnetCidrReservationInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetReservationType sets the ReservationType field's value.
+func (s *CreateSubnetCidrReservationInput) SetReservationType(v string) *CreateSubnetCidrReservationInput {
+ s.ReservationType = &v
+ return s
+}
+
+// SetSubnetId sets the SubnetId field's value.
+func (s *CreateSubnetCidrReservationInput) SetSubnetId(v string) *CreateSubnetCidrReservationInput {
+ s.SubnetId = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateSubnetCidrReservationInput) SetTagSpecifications(v []*TagSpecification) *CreateSubnetCidrReservationInput {
+ s.TagSpecifications = v
+ return s
+}
+
+type CreateSubnetCidrReservationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the created subnet CIDR reservation.
+ SubnetCidrReservation *SubnetCidrReservation `locationName:"subnetCidrReservation" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateSubnetCidrReservationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateSubnetCidrReservationOutput) GoString() string {
+ return s.String()
+}
+
+// SetSubnetCidrReservation sets the SubnetCidrReservation field's value.
+func (s *CreateSubnetCidrReservationOutput) SetSubnetCidrReservation(v *SubnetCidrReservation) *CreateSubnetCidrReservationOutput {
+ s.SubnetCidrReservation = v
+ return s
+}
+
type CreateSubnetInput struct {
_ struct{} `type:"structure"`
// The Availability Zone or Local Zone for the subnet.
//
- // Default: AWS selects one for you. If you create more than one subnet in your
- // VPC, we do not necessarily select a different zone for each subnet.
+ // Default: Amazon Web Services selects one for you. If you create more than
+ // one subnet in your VPC, we do not necessarily select a different zone for
+ // each subnet.
//
// To create a subnet in a Local Zone, set this value to the Local Zone ID,
// for example us-west-2-lax-1a. For information about the Regions that support
@@ -54162,7 +58082,7 @@ type CreateTransitGatewayConnectInput struct {
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
// The ID of the transit gateway attachment. You can specify a VPC attachment
- // or a AWS Direct Connect attachment.
+ // or Amazon Web Services Direct Connect attachment.
//
// TransportTransitGatewayAttachmentId is a required field
TransportTransitGatewayAttachmentId *string `type:"string" required:"true"`
@@ -54642,7 +58562,7 @@ type CreateTransitGatewayPeeringAttachmentInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // The AWS account ID of the owner of the peer transit gateway.
+ // The ID of the Amazon Web Services account that owns the peer transit gateway.
//
// PeerAccountId is a required field
PeerAccountId *string `type:"string" required:"true"`
@@ -55224,6 +59144,10 @@ type CreateVolumeInput struct {
// AvailabilityZone is a required field
AvailabilityZone *string `type:"string" required:"true"`
+ // Unique, case-sensitive identifier that you provide to ensure the idempotency
+ // of the request. For more information, see Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
@@ -55253,21 +59177,20 @@ type CreateVolumeInput struct {
//
// * io2: 100-64,000 IOPS
//
- // For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built
- // on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
- // Other instance families guarantee performance up to 32,000 IOPS.
+ // io1 and io2 volumes support up to 64,000 IOPS only on Instances built on
+ // the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
+ // Other instance families support performance up to 32,000 IOPS.
//
// This parameter is required for io1 and io2 volumes. The default for gp3 volumes
// is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard
// volumes.
Iops *int64 `type:"integer"`
- // The identifier of the AWS Key Management Service (AWS KMS) customer master
- // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified,
- // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted
- // state must be true.
+ // The identifier of the Key Management Service (KMS) KMS key to use for Amazon
+ // EBS encryption. If this parameter is not specified, your KMS key for Amazon
+ // EBS is used. If KmsKeyId is specified, the encrypted state must be true.
//
- // You can specify the CMK using any of the following:
+ // You can specify the KMS key using any of the following:
//
// * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.
//
@@ -55277,9 +59200,9 @@ type CreateVolumeInput struct {
//
// * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
- // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID,
- // alias, or ARN that is not valid, the action can appear to complete, but eventually
- // fails.
+ // Amazon Web Services authenticates the KMS key asynchronously. Therefore,
+ // if you specify an ID, alias, or ARN that is not valid, the action can appear
+ // to complete, but eventually fails.
KmsKeyId *string `type:"string"`
// Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach,
@@ -55371,6 +59294,12 @@ func (s *CreateVolumeInput) SetAvailabilityZone(v string) *CreateVolumeInput {
return s
}
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateVolumeInput) SetClientToken(v string) *CreateVolumeInput {
+ s.ClientToken = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *CreateVolumeInput) SetDryRun(v bool) *CreateVolumeInput {
s.DryRun = &v
@@ -55445,7 +59374,7 @@ type CreateVolumePermission struct {
// The group to be added or removed. The possible value is all.
Group *string `locationName:"group" type:"string" enum:"PermissionGroup"`
- // The AWS account ID to be added or removed.
+ // The ID of the Amazon Web Services account to be added or removed.
UserId *string `locationName:"userId" type:"string"`
}
@@ -55475,10 +59404,10 @@ func (s *CreateVolumePermission) SetUserId(v string) *CreateVolumePermission {
type CreateVolumePermissionModifications struct {
_ struct{} `type:"structure"`
- // Adds the specified AWS account ID or group to the list.
+ // Adds the specified Amazon Web Services account ID or group to the list.
Add []*CreateVolumePermission `locationNameList:"item" type:"list"`
- // Removes the specified AWS account ID or group from the list.
+ // Removes the specified Amazon Web Services account ID or group from the list.
Remove []*CreateVolumePermission `locationNameList:"item" type:"list"`
}
@@ -56095,9 +60024,9 @@ type CreateVpcPeeringConnectionInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
- // The AWS account ID of the owner of the accepter VPC.
+ // The Amazon Web Services account ID of the owner of the accepter VPC.
//
- // Default: Your AWS account ID
+ // Default: Your Amazon Web Services account ID
PeerOwnerId *string `locationName:"peerOwnerId" type:"string"`
// The Region code for the accepter VPC, if the accepter VPC is located in a
@@ -57443,6 +61372,89 @@ func (s *DeleteFpgaImageOutput) SetReturn(v bool) *DeleteFpgaImageOutput {
return s
}
+type DeleteInstanceEventWindowInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // Specify true to force delete the event window. Use the force delete parameter
+ // if the event window is currently associated with targets.
+ ForceDelete *bool `type:"boolean"`
+
+ // The ID of the event window.
+ //
+ // InstanceEventWindowId is a required field
+ InstanceEventWindowId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteInstanceEventWindowInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteInstanceEventWindowInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteInstanceEventWindowInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceEventWindowInput"}
+ if s.InstanceEventWindowId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceEventWindowId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DeleteInstanceEventWindowInput) SetDryRun(v bool) *DeleteInstanceEventWindowInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetForceDelete sets the ForceDelete field's value.
+func (s *DeleteInstanceEventWindowInput) SetForceDelete(v bool) *DeleteInstanceEventWindowInput {
+ s.ForceDelete = &v
+ return s
+}
+
+// SetInstanceEventWindowId sets the InstanceEventWindowId field's value.
+func (s *DeleteInstanceEventWindowInput) SetInstanceEventWindowId(v string) *DeleteInstanceEventWindowInput {
+ s.InstanceEventWindowId = &v
+ return s
+}
+
+type DeleteInstanceEventWindowOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The state of the event window.
+ InstanceEventWindowState *InstanceEventWindowStateChange `locationName:"instanceEventWindowState" type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteInstanceEventWindowOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteInstanceEventWindowOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceEventWindowState sets the InstanceEventWindowState field's value.
+func (s *DeleteInstanceEventWindowOutput) SetInstanceEventWindowState(v *InstanceEventWindowStateChange) *DeleteInstanceEventWindowOutput {
+ s.InstanceEventWindowState = v
+ return s
+}
+
type DeleteInternetGatewayInput struct {
_ struct{} `type:"structure"`
@@ -59107,6 +63119,79 @@ func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string {
return s.String()
}
+type DeleteSubnetCidrReservationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the subnet CIDR reservation.
+ //
+ // SubnetCidrReservationId is a required field
+ SubnetCidrReservationId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteSubnetCidrReservationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteSubnetCidrReservationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteSubnetCidrReservationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteSubnetCidrReservationInput"}
+ if s.SubnetCidrReservationId == nil {
+ invalidParams.Add(request.NewErrParamRequired("SubnetCidrReservationId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DeleteSubnetCidrReservationInput) SetDryRun(v bool) *DeleteSubnetCidrReservationInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetSubnetCidrReservationId sets the SubnetCidrReservationId field's value.
+func (s *DeleteSubnetCidrReservationInput) SetSubnetCidrReservationId(v string) *DeleteSubnetCidrReservationInput {
+ s.SubnetCidrReservationId = &v
+ return s
+}
+
+type DeleteSubnetCidrReservationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the deleted subnet CIDR reservation.
+ DeletedSubnetCidrReservation *SubnetCidrReservation `locationName:"deletedSubnetCidrReservation" type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteSubnetCidrReservationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteSubnetCidrReservationOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeletedSubnetCidrReservation sets the DeletedSubnetCidrReservation field's value.
+func (s *DeleteSubnetCidrReservationOutput) SetDeletedSubnetCidrReservation(v *SubnetCidrReservation) *DeleteSubnetCidrReservationOutput {
+ s.DeletedSubnetCidrReservation = v
+ return s
+}
+
type DeleteSubnetInput struct {
_ struct{} `type:"structure"`
@@ -59195,7 +63280,8 @@ type DeleteTagsInput struct {
// an empty string.
//
// If you omit this parameter, we delete all user-defined tags for the specified
- // resources. We do not delete AWS-generated tags (tags that have the aws: prefix).
+ // resources. We do not delete Amazon Web Services-generated tags (tags that
+ // have the aws: prefix).
Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"`
}
@@ -61294,6 +65380,115 @@ func (s *DescribeAccountAttributesOutput) SetAccountAttributes(v []*AccountAttri
return s
}
+type DescribeAddressesAttributeInput struct {
+ _ struct{} `type:"structure"`
+
+ // [EC2-VPC] The allocation IDs.
+ AllocationIds []*string `locationName:"AllocationId" locationNameList:"item" type:"list"`
+
+ // The attribute of the IP address.
+ Attribute *string `type:"string" enum:"AddressAttributeName"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"1" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeAddressesAttributeInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeAddressesAttributeInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeAddressesAttributeInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeAddressesAttributeInput"}
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAllocationIds sets the AllocationIds field's value.
+func (s *DescribeAddressesAttributeInput) SetAllocationIds(v []*string) *DescribeAddressesAttributeInput {
+ s.AllocationIds = v
+ return s
+}
+
+// SetAttribute sets the Attribute field's value.
+func (s *DescribeAddressesAttributeInput) SetAttribute(v string) *DescribeAddressesAttributeInput {
+ s.Attribute = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeAddressesAttributeInput) SetDryRun(v bool) *DescribeAddressesAttributeInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeAddressesAttributeInput) SetMaxResults(v int64) *DescribeAddressesAttributeInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeAddressesAttributeInput) SetNextToken(v string) *DescribeAddressesAttributeInput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeAddressesAttributeOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the IP addresses.
+ Addresses []*AddressAttribute `locationName:"addressSet" locationNameList:"item" type:"list"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeAddressesAttributeOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeAddressesAttributeOutput) GoString() string {
+ return s.String()
+}
+
+// SetAddresses sets the Addresses field's value.
+func (s *DescribeAddressesAttributeOutput) SetAddresses(v []*AddressAttribute) *DescribeAddressesAttributeOutput {
+ s.Addresses = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeAddressesAttributeOutput) SetNextToken(v string) *DescribeAddressesAttributeOutput {
+ s.NextToken = &v
+ return s
+}
+
type DescribeAddressesInput struct {
_ struct{} `type:"structure"`
@@ -61319,12 +65514,13 @@ type DescribeAddressesInput struct {
// if any.
//
// * network-border-group - A unique set of Availability Zones, Local Zones,
- // or Wavelength Zones from where AWS advertises IP addresses.
+ // or Wavelength Zones from where Amazon Web Services advertises IP addresses.
//
// * network-interface-id - [EC2-VPC] The ID of the network interface that
// the address is associated with, if any.
//
- // * network-interface-owner-id - The AWS account ID of the owner.
+ // * network-interface-owner-id - The Amazon Web Services account ID of the
+ // owner.
//
// * private-ip-address - [EC2-VPC] The private IP address associated with
// the Elastic IP address.
@@ -61790,7 +65986,8 @@ type DescribeCapacityReservationsInput struct {
// * instance-type - The type of instance for which the Capacity Reservation
// reserves capacity.
//
- // * owner-id - The ID of the AWS account that owns the Capacity Reservation.
+ // * owner-id - The ID of the Amazon Web Services account that owns the Capacity
+ // Reservation.
//
// * availability-zone-id - The Availability Zone ID of the Capacity Reservation.
//
@@ -61802,20 +65999,27 @@ type DescribeCapacityReservationsInput struct {
// * tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity
// Reservation can have one of the following tenancy settings: default -
// The Capacity Reservation is created on hardware that is shared with other
- // AWS accounts. dedicated - The Capacity Reservation is created on single-tenant
- // hardware that is dedicated to a single AWS account.
+ // Amazon Web Services accounts. dedicated - The Capacity Reservation is
+ // created on single-tenant hardware that is dedicated to a single Amazon
+ // Web Services account.
+ //
+ // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost on which
+ // the Capacity Reservation was created.
//
// * state - The current state of the Capacity Reservation. A Capacity Reservation
// can be in one of the following states: active- The Capacity Reservation
// is active and the capacity is available for your use. expired - The Capacity
// Reservation expired automatically at the date and time specified in your
// request. The reserved capacity is no longer available for your use. cancelled
- // - The Capacity Reservation was manually cancelled. The reserved capacity
- // is no longer available for your use. pending - The Capacity Reservation
- // request was successful but the capacity provisioning is still pending.
- // failed - The Capacity Reservation request has failed. A request might
- // fail due to invalid request parameters, capacity constraints, or instance
- // limit constraints. Failed requests are retained for 60 minutes.
+ // - The Capacity Reservation was cancelled. The reserved capacity is no
+ // longer available for your use. pending - The Capacity Reservation request
+ // was successful but the capacity provisioning is still pending. failed
+ // - The Capacity Reservation request has failed. A request might fail due
+ // to invalid request parameters, capacity constraints, or instance limit
+ // constraints. Failed requests are retained for 60 minutes.
+ //
+ // * start-date - The date and time at which the Capacity Reservation was
+ // started.
//
// * end-date - The date and time at which the Capacity Reservation expires.
// When a Capacity Reservation expires, the reserved capacity is released
@@ -61954,7 +66158,8 @@ type DescribeCarrierGatewaysInput struct {
// * state - The state of the carrier gateway (pending | failed | available
// | deleting | deleted).
//
- // * owner-id - The AWS account ID of the owner of the carrier gateway.
+ // * owner-id - The Amazon Web Services account ID of the owner of the carrier
+ // gateway.
//
// * tag: - The key/value combination of a tag assigned to the resource.
// Use the tag key in the filter name and the tag value as the filter value.
@@ -63089,7 +67294,8 @@ type DescribeDhcpOptionsInput struct {
//
// * value - The value for one of the options.
//
- // * owner-id - The ID of the AWS account that owns the DHCP options set.
+ // * owner-id - The ID of the Amazon Web Services account that owns the DHCP
+ // options set.
//
// * tag: - The key/value combination of a tag assigned to the resource.
// Use the tag key in the filter name and the tag value as the filter value.
@@ -63636,11 +67842,12 @@ type DescribeFastSnapshotRestoreSuccessItem struct {
// The time at which fast snapshot restores entered the optimizing state.
OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"`
- // The AWS owner alias that enabled fast snapshot restores on the snapshot.
- // This is intended for future use.
+ // The Amazon Web Services owner alias that enabled fast snapshot restores on
+ // the snapshot. This is intended for future use.
OwnerAlias *string `locationName:"ownerAlias" type:"string"`
- // The ID of the AWS account that enabled fast snapshot restores on the snapshot.
+ // The ID of the Amazon Web Services account that enabled fast snapshot restores
+ // on the snapshot.
OwnerId *string `locationName:"ownerId" type:"string"`
// The ID of the snapshot.
@@ -63748,8 +67955,8 @@ type DescribeFastSnapshotRestoresInput struct {
//
// * availability-zone: The Availability Zone of the snapshot.
//
- // * owner-id: The ID of the AWS account that enabled fast snapshot restore
- // on the snapshot.
+ // * owner-id: The ID of the Amazon Web Services account that enabled fast
+ // snapshot restore on the snapshot.
//
// * snapshot-id: The ID of the snapshot.
//
@@ -65274,9 +69481,9 @@ type DescribeImageAttributeInput struct {
// The AMI attribute.
//
- // Note: Depending on your account privileges, the blockDeviceMapping attribute
- // may return a Client.AuthFailure error. If this happens, use DescribeImages
- // to get information about the block device mapping for the AMI.
+ // Note: The blockDeviceMapping attribute is deprecated. Using this attribute
+ // returns the Client.AuthFailure error. To get information about the block
+ // device mappings for an AMI, use the DescribeImages action.
//
// Attribute is a required field
Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"`
@@ -65344,6 +69551,9 @@ type DescribeImageAttributeOutput struct {
// The block device mapping entries.
BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
+ // Describes a value for a resource attribute that is a String.
+ BootMode *AttributeValue `locationName:"bootMode" type:"structure"`
+
// A description for the AMI.
Description *AttributeValue `locationName:"description" type:"structure"`
@@ -65383,6 +69593,12 @@ func (s *DescribeImageAttributeOutput) SetBlockDeviceMappings(v []*BlockDeviceMa
return s
}
+// SetBootMode sets the BootMode field's value.
+func (s *DescribeImageAttributeOutput) SetBootMode(v *AttributeValue) *DescribeImageAttributeOutput {
+ s.BootMode = v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *DescribeImageAttributeOutput) SetDescription(v *AttributeValue) *DescribeImageAttributeOutput {
s.Description = v
@@ -65479,13 +69695,13 @@ type DescribeImagesInput struct {
//
// * name - The name of the AMI (provided during image creation).
//
- // * owner-alias - The owner alias, from an Amazon-maintained list (amazon
- // | aws-marketplace). This is not the user-configured AWS account alias
- // set using the IAM console. We recommend that you use the related parameter
- // instead of this filter.
+ // * owner-alias - The owner alias (amazon | aws-marketplace). The valid
+ // aliases are defined in an Amazon-maintained list. This is not the AWS
+ // account alias that can be set using the IAM console. We recommend that
+ // you use the Owner request parameter instead of this filter.
//
// * owner-id - The AWS account ID of the owner. We recommend that you use
- // the related parameter instead of this filter.
+ // the Owner request parameter instead of this filter.
//
// * platform - The platform. To only list Windows-based AMIs, use windows.
//
@@ -65527,6 +69743,14 @@ type DescribeImagesInput struct {
// Default: Describes all images available to you.
ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"`
+ // If true, all deprecated AMIs are included in the response. If false, no deprecated
+ // AMIs are included in the response. If no value is specified, the default
+ // value is false.
+ //
+ // If you are the AMI owner, all deprecated AMIs appear in the response regardless
+ // of the value (true or false) that you set for this parameter.
+ IncludeDeprecated *bool `type:"boolean"`
+
// Scopes the results to images with the specified owners. You can specify a
// combination of AWS account IDs, self, amazon, and aws-marketplace. If you
// omit this parameter, the results include all images for which you have launch
@@ -65568,6 +69792,12 @@ func (s *DescribeImagesInput) SetImageIds(v []*string) *DescribeImagesInput {
return s
}
+// SetIncludeDeprecated sets the IncludeDeprecated field's value.
+func (s *DescribeImagesInput) SetIncludeDeprecated(v bool) *DescribeImagesInput {
+ s.IncludeDeprecated = &v
+ return s
+}
+
// SetOwners sets the Owners field's value.
func (s *DescribeImagesInput) SetOwners(v []*string) *DescribeImagesInput {
s.Owners = v
@@ -65874,8 +70104,8 @@ type DescribeInstanceAttributeOutput struct {
// Indicates whether enhanced networking with ENA is enabled.
EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"`
- // To enable the instance for AWS Nitro Enclaves, set this parameter to true;
- // otherwise, set it to false.
+ // To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter
+ // to true; otherwise, set it to false.
EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"`
// The security groups associated with the instance.
@@ -65903,9 +70133,12 @@ type DescribeInstanceAttributeOutput struct {
// The device name of the root device volume (for example, /dev/sda1).
RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"`
- // Indicates whether source/destination checking is enabled. A value of true
- // means that checking is enabled, and false means that checking is disabled.
- // This value must be false for a NAT instance to perform NAT.
+ // Enable or disable source/destination checks, which ensure that the instance
+ // is either the source or the destination of any traffic that it receives.
+ // If the value is true, source/destination checks are enabled; otherwise, they
+ // are disabled. The default value is true. You must disable source/destination
+ // checks if the instance runs services such as network address translation,
+ // routing, or firewalls.
SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"`
// Indicates whether enhanced networking with the Intel 82599 Virtual Function
@@ -66188,6 +70421,150 @@ func (s *DescribeInstanceEventNotificationAttributesOutput) SetInstanceTagAttrib
return s
}
+// Describe instance event windows by InstanceEventWindow.
+type DescribeInstanceEventWindowsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters.
+ //
+ // * dedicated-host-id - The event windows associated with the specified
+ // Dedicated Host ID.
+ //
+ // * event-window-name - The event windows associated with the specified
+ // names.
+ //
+ // * instance-id - The event windows associated with the specified instance
+ // ID.
+ //
+ // * instance-tag - The event windows associated with the specified tag and
+ // value.
+ //
+ // * instance-tag-key - The event windows associated with the specified tag
+ // key, regardless of the value.
+ //
+ // * instance-tag-value - The event windows associated with the specified
+ // tag value, regardless of the key.
+ //
+ // * tag: - The key/value combination of a tag assigned to the event
+ // window. Use the tag key in the filter name and the tag value as the filter
+ // value. For example, to find all resources that have a tag with the key
+ // Owner and the value CMX, specify tag:Owner for the filter name and CMX
+ // for the filter value.
+ //
+ // * tag-key - The key of a tag assigned to the event window. Use this filter
+ // to find all event windows that have a tag with a specific key, regardless
+ // of the tag value.
+ //
+ // * tag-value - The value of a tag assigned to the event window. Use this
+ // filter to find all event windows that have a tag with a specific value,
+ // regardless of the tag key.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The IDs of the event windows.
+ InstanceEventWindowIds []*string `locationName:"InstanceEventWindowId" locationNameList:"InstanceEventWindowId" type:"list"`
+
+ // The maximum number of results to return in a single call. To retrieve the
+ // remaining results, make another call with the returned NextToken value. This
+ // value can be between 20 and 500. You cannot specify this parameter and the
+ // event window IDs parameter in the same call.
+ MaxResults *int64 `min:"20" type:"integer"`
+
+ // The token to request the next page of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeInstanceEventWindowsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeInstanceEventWindowsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeInstanceEventWindowsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceEventWindowsInput"}
+ if s.MaxResults != nil && *s.MaxResults < 20 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 20))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeInstanceEventWindowsInput) SetDryRun(v bool) *DescribeInstanceEventWindowsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeInstanceEventWindowsInput) SetFilters(v []*Filter) *DescribeInstanceEventWindowsInput {
+ s.Filters = v
+ return s
+}
+
+// SetInstanceEventWindowIds sets the InstanceEventWindowIds field's value.
+func (s *DescribeInstanceEventWindowsInput) SetInstanceEventWindowIds(v []*string) *DescribeInstanceEventWindowsInput {
+ s.InstanceEventWindowIds = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeInstanceEventWindowsInput) SetMaxResults(v int64) *DescribeInstanceEventWindowsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeInstanceEventWindowsInput) SetNextToken(v string) *DescribeInstanceEventWindowsInput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeInstanceEventWindowsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the event windows.
+ InstanceEventWindows []*InstanceEventWindow `locationName:"instanceEventWindowSet" locationNameList:"item" type:"list"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeInstanceEventWindowsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeInstanceEventWindowsOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceEventWindows sets the InstanceEventWindows field's value.
+func (s *DescribeInstanceEventWindowsOutput) SetInstanceEventWindows(v []*InstanceEventWindow) *DescribeInstanceEventWindowsOutput {
+ s.InstanceEventWindows = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeInstanceEventWindowsOutput) SetNextToken(v string) *DescribeInstanceEventWindowsOutput {
+ s.NextToken = &v
+ return s
+}
+
type DescribeInstanceStatusInput struct {
_ struct{} `type:"structure"`
@@ -66538,12 +70915,18 @@ type DescribeInstanceTypesInput struct {
//
// * memory-info.size-in-mib - The memory size.
//
+ // * network-info.efa-info.maximum-efa-interfaces - The maximum number of
+ // Elastic Fabric Adapters (EFAs) per instance.
+ //
// * network-info.efa-supported - Indicates whether the instance type supports
// Elastic Fabric Adapter (EFA) (true | false).
//
// * network-info.ena-support - Indicates whether Elastic Network Adapter
// (ENA) is supported or required (required | supported | unsupported).
//
+ // * network-info.encryption-in-transit-supported - Indicates whether the
+ // instance type automatically encrypts in-transit traffic between instances.
+ //
// * network-info.ipv4-addresses-per-interface - The maximum number of private
// IPv4 addresses per network interface.
//
@@ -66565,6 +70948,8 @@ type DescribeInstanceTypesInput struct {
// * processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in
// GHz.
//
+ // * supported-boot-mode - The boot mode (legacy-bios | uefi).
+ //
// * supported-root-device-type - The root device type (ebs | instance-store).
//
// * supported-usage-class - The usage class (on-demand | spot).
@@ -66587,8 +70972,8 @@ type DescribeInstanceTypesInput struct {
// can be configured for the instance type. For example, "1" or "1,2".
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The instance types. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // The instance types. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ // in the Amazon EC2 User Guide.
InstanceTypes []*string `locationName:"InstanceType" type:"list"`
// The maximum number of results to return for the request in a single page.
@@ -66656,8 +71041,8 @@ func (s *DescribeInstanceTypesInput) SetNextToken(v string) *DescribeInstanceTyp
type DescribeInstanceTypesOutput struct {
_ struct{} `type:"structure"`
- // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ // in the Amazon EC2 User Guide.
InstanceTypes []*InstanceTypeInfo `locationName:"instanceTypeSet" locationNameList:"item" type:"list"`
// The token to use to retrieve the next page of results. This value is null
@@ -66859,7 +71244,7 @@ type DescribeInstancesInput struct {
// * network-interface.requester-id - The requester ID for the network interface.
//
// * network-interface.requester-managed - Indicates whether the network
- // interface is being managed by AWS.
+ // interface is being managed by Amazon Web Services.
//
// * network-interface.status - The status of the network interface (available)
// | in-use).
@@ -66874,7 +71259,9 @@ type DescribeInstancesInput struct {
//
// * network-interface.vpc-id - The ID of the VPC for the network interface.
//
- // * owner-id - The AWS account ID of the instance owner.
+ // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost.
+ //
+ // * owner-id - The Amazon Web Services account ID of the instance owner.
//
// * placement-group-name - The name of the placement group for the instance.
//
@@ -66899,7 +71286,8 @@ type DescribeInstancesInput struct {
// Similar to the state-reason-code filter.
//
// * requester-id - The ID of the entity that launched the instance on your
- // behalf (for example, AWS Management Console, Auto Scaling, and so on).
+ // behalf (for example, Amazon Web Services Management Console, Auto Scaling,
+ // and so on).
//
// * reservation-id - The ID of the instance's reservation. A reservation
// ID is created any time you launch an instance. A reservation ID has a
@@ -67051,7 +71439,8 @@ type DescribeInternetGatewaysInput struct {
//
// * internet-gateway-id - The ID of the Internet gateway.
//
- // * owner-id - The ID of the AWS account that owns the internet gateway.
+ // * owner-id - The ID of the Amazon Web Services account that owns the internet
+ // gateway.
//
// * tag: - The key/value combination of a tag assigned to the resource.
// Use the tag key in the filter name and the tag value as the filter value.
@@ -67312,7 +71701,7 @@ type DescribeKeyPairsInput struct {
// The key pair names.
//
- // Default: Describes all your key pairs.
+ // Default: Describes all of your key pairs.
KeyNames []*string `locationName:"KeyName" locationNameList:"KeyName" type:"list"`
// The IDs of the key pairs.
@@ -68798,7 +73187,8 @@ type DescribeNetworkAclsInput struct {
//
// * network-acl-id - The ID of the network ACL.
//
- // * owner-id - The ID of the AWS account that owns the network ACL.
+ // * owner-id - The ID of the Amazon Web Services account that owns the network
+ // ACL.
//
// * tag: - The key/value combination of a tag assigned to the resource.
// Use the tag key in the filter name and the tag value as the filter value.
@@ -69305,9 +73695,10 @@ type DescribeNetworkInterfacePermissionsInput struct {
// * network-interface-permission.network-interface-id - The ID of the network
// interface.
//
- // * network-interface-permission.aws-account-id - The AWS account ID.
+ // * network-interface-permission.aws-account-id - The Amazon Web Services
+ // account ID.
//
- // * network-interface-permission.aws-service - The AWS service.
+ // * network-interface-permission.aws-service - The Amazon Web Service.
//
// * network-interface-permission.permission - The type of permission (INSTANCE-ATTACH
// | EIP-ASSOCIATE).
@@ -69480,19 +73871,20 @@ type DescribeNetworkInterfacesInput struct {
//
// * network-interface-id - The ID of the network interface.
//
- // * owner-id - The AWS account ID of the network interface owner.
+ // * owner-id - The Amazon Web Services account ID of the network interface
+ // owner.
//
// * private-ip-address - The private IPv4 address or addresses of the network
// interface.
//
// * private-dns-name - The private DNS name of the network interface (IPv4).
//
- // * requester-id - The ID of the entity that launched the instance on your
- // behalf (for example, AWS Management Console, Auto Scaling, and so on).
+ // * requester-id - The alias or Amazon Web Services account ID of the principal
+ // or service that created the network interface.
//
// * requester-managed - Indicates whether the network interface is being
- // managed by an AWS service (for example, AWS Management Console, Auto Scaling,
- // and so on).
+ // managed by an Amazon Web Service (for example, Amazon Web Services Management
+ // Console, Auto Scaling, and so on).
//
// * source-dest-check - Indicates whether the network interface performs
// source/destination checking. A value of true means checking is enabled,
@@ -70114,6 +74506,118 @@ func (s *DescribeRegionsOutput) SetRegions(v []*Region) *DescribeRegionsOutput {
return s
}
+type DescribeReplaceRootVolumeTasksInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // Filter to use:
+ //
+ // * instance-id - The ID of the instance for which the root volume replacement
+ // task was created.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"1" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The ID of the root volume replacement task to view.
+ ReplaceRootVolumeTaskIds []*string `locationName:"ReplaceRootVolumeTaskId" locationNameList:"ReplaceRootVolumeTaskId" type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeReplaceRootVolumeTasksInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeReplaceRootVolumeTasksInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeReplaceRootVolumeTasksInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeReplaceRootVolumeTasksInput"}
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeReplaceRootVolumeTasksInput) SetDryRun(v bool) *DescribeReplaceRootVolumeTasksInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeReplaceRootVolumeTasksInput) SetFilters(v []*Filter) *DescribeReplaceRootVolumeTasksInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeReplaceRootVolumeTasksInput) SetMaxResults(v int64) *DescribeReplaceRootVolumeTasksInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeReplaceRootVolumeTasksInput) SetNextToken(v string) *DescribeReplaceRootVolumeTasksInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetReplaceRootVolumeTaskIds sets the ReplaceRootVolumeTaskIds field's value.
+func (s *DescribeReplaceRootVolumeTasksInput) SetReplaceRootVolumeTaskIds(v []*string) *DescribeReplaceRootVolumeTasksInput {
+ s.ReplaceRootVolumeTaskIds = v
+ return s
+}
+
+type DescribeReplaceRootVolumeTasksOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // Information about the root volume replacement task.
+ ReplaceRootVolumeTasks []*ReplaceRootVolumeTask `locationName:"replaceRootVolumeTaskSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeReplaceRootVolumeTasksOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeReplaceRootVolumeTasksOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeReplaceRootVolumeTasksOutput) SetNextToken(v string) *DescribeReplaceRootVolumeTasksOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetReplaceRootVolumeTasks sets the ReplaceRootVolumeTasks field's value.
+func (s *DescribeReplaceRootVolumeTasksOutput) SetReplaceRootVolumeTasks(v []*ReplaceRootVolumeTask) *DescribeReplaceRootVolumeTasksOutput {
+ s.ReplaceRootVolumeTasks = v
+ return s
+}
+
// Contains the parameters for DescribeReservedInstances.
type DescribeReservedInstancesInput struct {
_ struct{} `type:"structure"`
@@ -70146,10 +74650,11 @@ type DescribeReservedInstancesInput struct {
// will only be displayed to EC2-Classic account holders and are for use
// with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE
// Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux
- // (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server
- // Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with
- // SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with
- // SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)).
+ // (Amazon VPC) | Red Hat Enterprise Linux with HA (Amazon VPC) | Windows
+ // | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with
+ // SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows
+ // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise
+ // | Windows with SQL Server Enterprise (Amazon VPC)).
//
// * reserved-instances-id - The ID of the Reserved Instance.
//
@@ -70439,19 +74944,19 @@ type DescribeReservedInstancesOfferingsInput struct {
//
// * marketplace - Set to true to show only Reserved Instance Marketplace
// offerings. When this filter is not used, which is the default behavior,
- // all offerings from both AWS and the Reserved Instance Marketplace are
- // listed.
+ // all offerings from both Amazon Web Services and the Reserved Instance
+ // Marketplace are listed.
//
// * product-description - The Reserved Instance product platform description.
// Instances that include (Amazon VPC) in the product platform description
// will only be displayed to EC2-Classic account holders and are for use
// with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux |
// SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise
- // Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL
- // Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows
- // with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows
- // with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon
- // VPC))
+ // Linux (Amazon VPC) | Red Hat Enterprise Linux with HA (Amazon VPC) | Windows
+ // | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with
+ // SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows
+ // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise
+ // | Windows with SQL Server Enterprise (Amazon VPC))
//
// * reserved-instances-offering-id - The Reserved Instances offering ID.
//
@@ -70475,8 +74980,8 @@ type DescribeReservedInstancesOfferingsInput struct {
InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"`
// The instance type that the reservation will cover (for example, m1.small).
- // For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ // in the Amazon EC2 User Guide.
InstanceType *string `type:"string" enum:"InstanceType"`
// The maximum duration (in seconds) to filter when searching for offerings.
@@ -70701,7 +75206,8 @@ type DescribeRouteTablesInput struct {
// table for the VPC (true | false). Route tables that do not have an association
// ID are not returned in the response.
//
- // * owner-id - The ID of the AWS account that owns the route table.
+ // * owner-id - The ID of the Amazon Web Services account that owns the route
+ // table.
//
// * route-table-id - The ID of the route table.
//
@@ -70711,8 +75217,8 @@ type DescribeRouteTablesInput struct {
// * route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in
// a route in the route table.
//
- // * route.destination-prefix-list-id - The ID (prefix) of the AWS service
- // specified in a route in the table.
+ // * route.destination-prefix-list-id - The ID (prefix) of the Amazon Web
+ // Service specified in a route in the table.
//
// * route.egress-only-internet-gateway-id - The ID of an egress-only Internet
// gateway specified in a route in the route table.
@@ -71208,6 +75714,127 @@ func (s *DescribeSecurityGroupReferencesOutput) SetSecurityGroupReferenceSet(v [
return s
}
+type DescribeSecurityGroupRulesInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters.
+ //
+ // * group-id - The ID of the security group.
+ //
+ // * security-group-rule-id - The ID of the security group rule.
+ //
+ // * tag: - The key/value combination of a tag assigned to the resource.
+ // Use the tag key in the filter name and the tag value as the filter value.
+ // For example, to find all resources that have a tag with the key Owner
+ // and the value TeamA, specify tag:Owner for the filter name and TeamA for
+ // the filter value.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return in a single call. To retrieve the
+ // remaining results, make another request with the returned NextToken value.
+ // This value can be between 5 and 1000. If this parameter is not specified,
+ // then all results are returned.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The IDs of the security group rules.
+ SecurityGroupRuleIds []*string `locationName:"SecurityGroupRuleId" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeSecurityGroupRulesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeSecurityGroupRulesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeSecurityGroupRulesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityGroupRulesInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeSecurityGroupRulesInput) SetDryRun(v bool) *DescribeSecurityGroupRulesInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeSecurityGroupRulesInput) SetFilters(v []*Filter) *DescribeSecurityGroupRulesInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeSecurityGroupRulesInput) SetMaxResults(v int64) *DescribeSecurityGroupRulesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeSecurityGroupRulesInput) SetNextToken(v string) *DescribeSecurityGroupRulesInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetSecurityGroupRuleIds sets the SecurityGroupRuleIds field's value.
+func (s *DescribeSecurityGroupRulesInput) SetSecurityGroupRuleIds(v []*string) *DescribeSecurityGroupRulesInput {
+ s.SecurityGroupRuleIds = v
+ return s
+}
+
+type DescribeSecurityGroupRulesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // Information about security group rules.
+ SecurityGroupRules []*SecurityGroupRule `locationName:"securityGroupRuleSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeSecurityGroupRulesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeSecurityGroupRulesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeSecurityGroupRulesOutput) SetNextToken(v string) *DescribeSecurityGroupRulesOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetSecurityGroupRules sets the SecurityGroupRules field's value.
+func (s *DescribeSecurityGroupRulesOutput) SetSecurityGroupRules(v []*SecurityGroupRule) *DescribeSecurityGroupRulesOutput {
+ s.SecurityGroupRules = v
+ return s
+}
+
type DescribeSecurityGroupsInput struct {
_ struct{} `type:"structure"`
@@ -71233,7 +75860,7 @@ type DescribeSecurityGroupsInput struct {
// been referenced in an outbound security group rule.
//
// * egress.ip-permission.group-name - The name of a security group that
- // has been referenced in an outbound security group rule.
+ // is referenced in an outbound security group rule.
//
// * egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound
// security group rule.
@@ -71242,13 +75869,13 @@ type DescribeSecurityGroupsInput struct {
// a security group rule allows outbound access.
//
// * egress.ip-permission.protocol - The IP protocol for an outbound security
- // group rule (tcp | udp | icmp or a protocol number).
+ // group rule (tcp | udp | icmp, a protocol number, or -1 for all protocols).
//
// * egress.ip-permission.to-port - For an outbound rule, the end of port
// range for the TCP and UDP protocols, or an ICMP code.
//
- // * egress.ip-permission.user-id - The ID of an AWS account that has been
- // referenced in an outbound security group rule.
+ // * egress.ip-permission.user-id - The ID of an Amazon Web Services account
+ // that has been referenced in an outbound security group rule.
//
// * group-id - The ID of the security group.
//
@@ -71263,8 +75890,8 @@ type DescribeSecurityGroupsInput struct {
// * ip-permission.group-id - The ID of a security group that has been referenced
// in an inbound security group rule.
//
- // * ip-permission.group-name - The name of a security group that has been
- // referenced in an inbound security group rule.
+ // * ip-permission.group-name - The name of a security group that is referenced
+ // in an inbound security group rule.
//
// * ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security
// group rule.
@@ -71273,15 +75900,16 @@ type DescribeSecurityGroupsInput struct {
// security group rule allows inbound access.
//
// * ip-permission.protocol - The IP protocol for an inbound security group
- // rule (tcp | udp | icmp or a protocol number).
+ // rule (tcp | udp | icmp, a protocol number, or -1 for all protocols).
//
// * ip-permission.to-port - For an inbound rule, the end of port range for
// the TCP and UDP protocols, or an ICMP code.
//
- // * ip-permission.user-id - The ID of an AWS account that has been referenced
- // in an inbound security group rule.
+ // * ip-permission.user-id - The ID of an Amazon Web Services account that
+ // has been referenced in an inbound security group rule.
//
- // * owner-id - The AWS account ID of the owner of the security group.
+ // * owner-id - The Amazon Web Services account ID of the owner of the security
+ // group.
//
// * tag: - The key/value combination of a tag assigned to the resource.
// Use the tag key in the filter name and the tag value as the filter value.
@@ -71299,7 +75927,7 @@ type DescribeSecurityGroupsInput struct {
// The IDs of the security groups. Required for security groups in a nondefault
// VPC.
//
- // Default: Describes all your security groups.
+ // Default: Describes all of your security groups.
GroupIds []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"`
// [EC2-Classic and default VPC only] The names of the security groups. You
@@ -71307,7 +75935,7 @@ type DescribeSecurityGroupsInput struct {
// security groups in a nondefault VPC, use the group-name filter to describe
// security groups by name.
//
- // Default: Describes all your security groups.
+ // Default: Describes all of your security groups.
GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"`
// The maximum number of results to return in a single call. To retrieve the
@@ -71534,11 +76162,12 @@ type DescribeSnapshotsInput struct {
// * encrypted - Indicates whether the snapshot is encrypted (true | false)
//
// * owner-alias - The owner alias, from an Amazon-maintained list (amazon).
- // This is not the user-configured AWS account alias set using the IAM console.
- // We recommend that you use the related parameter instead of this filter.
+ // This is not the user-configured Amazon Web Services account alias set
+ // using the IAM console. We recommend that you use the related parameter
+ // instead of this filter.
//
- // * owner-id - The AWS account ID of the owner. We recommend that you use
- // the related parameter instead of this filter.
+ // * owner-id - The Amazon Web Services account ID of the owner. We recommend
+ // that you use the related parameter instead of this filter.
//
// * progress - The progress of the snapshot, as a percentage (for example,
// 80%).
@@ -71583,10 +76212,11 @@ type DescribeSnapshotsInput struct {
NextToken *string `type:"string"`
// Scopes the results to snapshots with the specified owners. You can specify
- // a combination of AWS account IDs, self, and amazon.
+ // a combination of Amazon Web Services account IDs, self, and amazon.
OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"`
- // The IDs of the AWS accounts that can create volumes from the snapshot.
+ // The IDs of the Amazon Web Services accounts that can create volumes from
+ // the snapshot.
RestorableByUserIds []*string `locationName:"RestorableBy" type:"list"`
// The snapshot IDs.
@@ -72571,6 +77201,124 @@ func (s *DescribeStaleSecurityGroupsOutput) SetStaleSecurityGroupSet(v []*StaleS
return s
}
+type DescribeStoreImageTasksInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The filters.
+ //
+ // * task-state - Returns tasks in a certain state (InProgress | Completed
+ // | Failed)
+ //
+ // * bucket - Returns task information for tasks that targeted a specific
+ // bucket. For the filter value, specify the bucket name.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The AMI IDs for which to show progress. Up to 20 AMI IDs can be included
+ // in a request.
+ ImageIds []*string `locationName:"ImageId" locationNameList:"item" type:"list"`
+
+ // The maximum number of results to return in a single call. To retrieve the
+ // remaining results, make another call with the returned NextToken value. This
+ // value can be between 1 and 200. You cannot specify this parameter and the
+ // ImageIDs parameter in the same call.
+ MaxResults *int64 `min:"1" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeStoreImageTasksInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeStoreImageTasksInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeStoreImageTasksInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeStoreImageTasksInput"}
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeStoreImageTasksInput) SetDryRun(v bool) *DescribeStoreImageTasksInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeStoreImageTasksInput) SetFilters(v []*Filter) *DescribeStoreImageTasksInput {
+ s.Filters = v
+ return s
+}
+
+// SetImageIds sets the ImageIds field's value.
+func (s *DescribeStoreImageTasksInput) SetImageIds(v []*string) *DescribeStoreImageTasksInput {
+ s.ImageIds = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeStoreImageTasksInput) SetMaxResults(v int64) *DescribeStoreImageTasksInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeStoreImageTasksInput) SetNextToken(v string) *DescribeStoreImageTasksInput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeStoreImageTasksOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The information about the AMI store tasks.
+ StoreImageTaskResults []*StoreImageTaskResult `locationName:"storeImageTaskResultSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeStoreImageTasksOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeStoreImageTasksOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeStoreImageTasksOutput) SetNextToken(v string) *DescribeStoreImageTasksOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetStoreImageTaskResults sets the StoreImageTaskResults field's value.
+func (s *DescribeStoreImageTasksOutput) SetStoreImageTaskResults(v []*StoreImageTaskResult) *DescribeStoreImageTasksOutput {
+ s.StoreImageTaskResults = v
+ return s
+}
+
type DescribeSubnetsInput struct {
_ struct{} `type:"structure"`
@@ -72607,7 +77355,9 @@ type DescribeSubnetsInput struct {
// * ipv6-cidr-block-association.state - The state of an IPv6 CIDR block
// associated with the subnet.
//
- // * owner-id - The ID of the AWS account that owns the subnet.
+ // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost.
+ //
+ // * owner-id - The ID of the Amazon Web Services account that owns the subnet.
//
// * state - The state of the subnet (pending | available).
//
@@ -73213,7 +77963,8 @@ type DescribeTransitGatewayAttachmentsInput struct {
//
// * resource-id - The ID of the resource.
//
- // * resource-owner-id - The ID of the AWS account that owns the resource.
+ // * resource-owner-id - The ID of the Amazon Web Services account that owns
+ // the resource.
//
// * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway
// | peering | connect.
@@ -73226,8 +77977,8 @@ type DescribeTransitGatewayAttachmentsInput struct {
//
// * transit-gateway-id - The ID of the transit gateway.
//
- // * transit-gateway-owner-id - The ID of the AWS account that owns the transit
- // gateway.
+ // * transit-gateway-owner-id - The ID of the Amazon Web Services account
+ // that owns the transit gateway.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The maximum number of results to return with a single call. To retrieve the
@@ -73695,10 +78446,10 @@ type DescribeTransitGatewayPeeringAttachmentsInput struct {
//
// * transit-gateway-attachment-id - The ID of the transit gateway attachment.
//
- // * local-owner-id - The ID of your AWS account.
+ // * local-owner-id - The ID of your Amazon Web Services account.
//
- // * remote-owner-id - The ID of the AWS account in the remote Region that
- // owns the transit gateway.
+ // * remote-owner-id - The ID of the Amazon Web Services account in the remote
+ // Region that owns the transit gateway.
//
// * state - The state of the peering attachment. Valid values are available
// | deleted | deleting | failed | failing | initiatingRequest | modifying
@@ -74093,7 +78844,8 @@ type DescribeTransitGatewaysInput struct {
// * options.vpn-ecmp-support - Indicates whether Equal Cost Multipath Protocol
// support is enabled (enable | disable).
//
- // * owner-id - The ID of the AWS account that owns the transit gateway.
+ // * owner-id - The ID of the Amazon Web Services account that owns the transit
+ // gateway.
//
// * state - The state of the transit gateway (available | deleted | deleting
// | modifying | pending).
@@ -74198,6 +78950,120 @@ func (s *DescribeTransitGatewaysOutput) SetTransitGateways(v []*TransitGateway)
return s
}
+type DescribeTrunkInterfaceAssociationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The IDs of the associations.
+ AssociationIds []*string `locationName:"AssociationId" locationNameList:"item" type:"list"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters.
+ //
+ // * gre-key - The ID of a trunk interface association.
+ //
+ // * interface-protocol - The interface protocol. Valid values are VLAN and
+ // GRE.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeTrunkInterfaceAssociationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTrunkInterfaceAssociationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeTrunkInterfaceAssociationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeTrunkInterfaceAssociationsInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAssociationIds sets the AssociationIds field's value.
+func (s *DescribeTrunkInterfaceAssociationsInput) SetAssociationIds(v []*string) *DescribeTrunkInterfaceAssociationsInput {
+ s.AssociationIds = v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeTrunkInterfaceAssociationsInput) SetDryRun(v bool) *DescribeTrunkInterfaceAssociationsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeTrunkInterfaceAssociationsInput) SetFilters(v []*Filter) *DescribeTrunkInterfaceAssociationsInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeTrunkInterfaceAssociationsInput) SetMaxResults(v int64) *DescribeTrunkInterfaceAssociationsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeTrunkInterfaceAssociationsInput) SetNextToken(v string) *DescribeTrunkInterfaceAssociationsInput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeTrunkInterfaceAssociationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the trunk associations.
+ InterfaceAssociations []*TrunkInterfaceAssociation `locationName:"interfaceAssociationSet" locationNameList:"item" type:"list"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeTrunkInterfaceAssociationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTrunkInterfaceAssociationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetInterfaceAssociations sets the InterfaceAssociations field's value.
+func (s *DescribeTrunkInterfaceAssociationsOutput) SetInterfaceAssociations(v []*TrunkInterfaceAssociation) *DescribeTrunkInterfaceAssociationsOutput {
+ s.InterfaceAssociations = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeTrunkInterfaceAssociationsOutput) SetNextToken(v string) *DescribeTrunkInterfaceAssociationsOutput {
+ s.NextToken = &v
+ return s
+}
+
type DescribeVolumeAttributeInput struct {
_ struct{} `type:"structure"`
@@ -75454,6 +80320,8 @@ type DescribeVpcEndpointServicesInput struct {
//
// * service-name - The name of the service.
//
+ // * service-type - The type of service (Interface | Gateway).
+ //
// * tag: - The key/value combination of a tag assigned to the resource.
// Use the tag key in the filter name and the tag value as the filter value.
// For example, to find all resources that have a tag with the key Owner
@@ -75700,8 +80568,8 @@ type DescribeVpcPeeringConnectionsInput struct {
//
// * accepter-vpc-info.cidr-block - The IPv4 CIDR block of the accepter VPC.
//
- // * accepter-vpc-info.owner-id - The AWS account ID of the owner of the
- // accepter VPC.
+ // * accepter-vpc-info.owner-id - The ID of the Amazon Web Services account
+ // that owns the accepter VPC.
//
// * accepter-vpc-info.vpc-id - The ID of the accepter VPC.
//
@@ -75710,8 +80578,8 @@ type DescribeVpcPeeringConnectionsInput struct {
// * requester-vpc-info.cidr-block - The IPv4 CIDR block of the requester's
// VPC.
//
- // * requester-vpc-info.owner-id - The AWS account ID of the owner of the
- // requester VPC.
+ // * requester-vpc-info.owner-id - The ID of the Amazon Web Services account
+ // that owns the requester VPC.
//
// * requester-vpc-info.vpc-id - The ID of the requester VPC.
//
@@ -75872,9 +80740,9 @@ type DescribeVpcsInput struct {
// * ipv6-cidr-block-association.state - The state of an IPv6 CIDR block
// associated with the VPC.
//
- // * isDefault - Indicates whether the VPC is the default VPC.
+ // * is-default - Indicates whether the VPC is the default VPC.
//
- // * owner-id - The ID of the AWS account that owns the VPC.
+ // * owner-id - The ID of the Amazon Web Services account that owns the VPC.
//
// * state - The state of the VPC (pending | available).
//
@@ -76659,7 +81527,7 @@ type DhcpOptions struct {
// The ID of the set of DHCP options.
DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"`
- // The ID of the AWS account that owns the DHCP options set.
+ // The ID of the Amazon Web Services account that owns the DHCP options set.
OwnerId *string `locationName:"ownerId" type:"string"`
// Any tags assigned to the DHCP options set.
@@ -76920,11 +81788,12 @@ type DisableFastSnapshotRestoreSuccessItem struct {
// The time at which fast snapshot restores entered the optimizing state.
OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"`
- // The AWS owner alias that enabled fast snapshot restores on the snapshot.
- // This is intended for future use.
+ // The Amazon Web Services owner alias that enabled fast snapshot restores on
+ // the snapshot. This is intended for future use.
OwnerAlias *string `locationName:"ownerAlias" type:"string"`
- // The ID of the AWS account that enabled fast snapshot restores on the snapshot.
+ // The ID of the Amazon Web Services account that enabled fast snapshot restores
+ // on the snapshot.
OwnerId *string `locationName:"ownerId" type:"string"`
// The ID of the snapshot.
@@ -77117,6 +81986,130 @@ func (s *DisableFastSnapshotRestoresOutput) SetUnsuccessful(v []*DisableFastSnap
return s
}
+type DisableImageDeprecationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the AMI.
+ //
+ // ImageId is a required field
+ ImageId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DisableImageDeprecationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisableImageDeprecationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisableImageDeprecationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisableImageDeprecationInput"}
+ if s.ImageId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DisableImageDeprecationInput) SetDryRun(v bool) *DisableImageDeprecationInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *DisableImageDeprecationInput) SetImageId(v string) *DisableImageDeprecationInput {
+ s.ImageId = &v
+ return s
+}
+
+type DisableImageDeprecationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Returns true if the request succeeds; otherwise, it returns an error.
+ Return *bool `locationName:"return" type:"boolean"`
+}
+
+// String returns the string representation
+func (s DisableImageDeprecationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisableImageDeprecationOutput) GoString() string {
+ return s.String()
+}
+
+// SetReturn sets the Return field's value.
+func (s *DisableImageDeprecationOutput) SetReturn(v bool) *DisableImageDeprecationOutput {
+ s.Return = &v
+ return s
+}
+
+type DisableSerialConsoleAccessInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s DisableSerialConsoleAccessInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisableSerialConsoleAccessInput) GoString() string {
+ return s.String()
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DisableSerialConsoleAccessInput) SetDryRun(v bool) *DisableSerialConsoleAccessInput {
+ s.DryRun = &v
+ return s
+}
+
+type DisableSerialConsoleAccessOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If true, access to the EC2 serial console of all instances is enabled for
+ // your account. If false, access to the EC2 serial console of all instances
+ // is disabled for your account.
+ SerialConsoleAccessEnabled *bool `locationName:"serialConsoleAccessEnabled" type:"boolean"`
+}
+
+// String returns the string representation
+func (s DisableSerialConsoleAccessOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisableSerialConsoleAccessOutput) GoString() string {
+ return s.String()
+}
+
+// SetSerialConsoleAccessEnabled sets the SerialConsoleAccessEnabled field's value.
+func (s *DisableSerialConsoleAccessOutput) SetSerialConsoleAccessEnabled(v bool) *DisableSerialConsoleAccessOutput {
+ s.SerialConsoleAccessEnabled = &v
+ return s
+}
+
type DisableTransitGatewayRouteTablePropagationInput struct {
_ struct{} `type:"structure"`
@@ -77700,6 +82693,93 @@ func (s *DisassociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation(
return s
}
+type DisassociateInstanceEventWindowInput struct {
+ _ struct{} `type:"structure"`
+
+ // One or more targets to disassociate from the specified event window.
+ //
+ // AssociationTarget is a required field
+ AssociationTarget *InstanceEventWindowDisassociationRequest `type:"structure" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the event window.
+ //
+ // InstanceEventWindowId is a required field
+ InstanceEventWindowId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DisassociateInstanceEventWindowInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisassociateInstanceEventWindowInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisassociateInstanceEventWindowInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisassociateInstanceEventWindowInput"}
+ if s.AssociationTarget == nil {
+ invalidParams.Add(request.NewErrParamRequired("AssociationTarget"))
+ }
+ if s.InstanceEventWindowId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceEventWindowId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAssociationTarget sets the AssociationTarget field's value.
+func (s *DisassociateInstanceEventWindowInput) SetAssociationTarget(v *InstanceEventWindowDisassociationRequest) *DisassociateInstanceEventWindowInput {
+ s.AssociationTarget = v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DisassociateInstanceEventWindowInput) SetDryRun(v bool) *DisassociateInstanceEventWindowInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetInstanceEventWindowId sets the InstanceEventWindowId field's value.
+func (s *DisassociateInstanceEventWindowInput) SetInstanceEventWindowId(v string) *DisassociateInstanceEventWindowInput {
+ s.InstanceEventWindowId = &v
+ return s
+}
+
+type DisassociateInstanceEventWindowOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the event window.
+ InstanceEventWindow *InstanceEventWindow `locationName:"instanceEventWindow" type:"structure"`
+}
+
+// String returns the string representation
+func (s DisassociateInstanceEventWindowOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisassociateInstanceEventWindowOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceEventWindow sets the InstanceEventWindow field's value.
+func (s *DisassociateInstanceEventWindowOutput) SetInstanceEventWindow(v *InstanceEventWindow) *DisassociateInstanceEventWindowOutput {
+ s.InstanceEventWindow = v
+ return s
+}
+
type DisassociateRouteTableInput struct {
_ struct{} `type:"structure"`
@@ -77998,6 +83078,99 @@ func (s *DisassociateTransitGatewayRouteTableOutput) SetAssociation(v *TransitGa
return s
}
+type DisassociateTrunkInterfaceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the association
+ //
+ // AssociationId is a required field
+ AssociationId *string `type:"string" required:"true"`
+
+ // Unique, case-sensitive identifier that you provide to ensure the idempotency
+ // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s DisassociateTrunkInterfaceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisassociateTrunkInterfaceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisassociateTrunkInterfaceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisassociateTrunkInterfaceInput"}
+ if s.AssociationId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AssociationId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAssociationId sets the AssociationId field's value.
+func (s *DisassociateTrunkInterfaceInput) SetAssociationId(v string) *DisassociateTrunkInterfaceInput {
+ s.AssociationId = &v
+ return s
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *DisassociateTrunkInterfaceInput) SetClientToken(v string) *DisassociateTrunkInterfaceInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DisassociateTrunkInterfaceInput) SetDryRun(v bool) *DisassociateTrunkInterfaceInput {
+ s.DryRun = &v
+ return s
+}
+
+type DisassociateTrunkInterfaceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Unique, case-sensitive identifier that you provide to ensure the idempotency
+ // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ ClientToken *string `locationName:"clientToken" type:"string"`
+
+ // Returns true if the request succeeds; otherwise, it returns an error.
+ Return *bool `locationName:"return" type:"boolean"`
+}
+
+// String returns the string representation
+func (s DisassociateTrunkInterfaceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisassociateTrunkInterfaceOutput) GoString() string {
+ return s.String()
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *DisassociateTrunkInterfaceOutput) SetClientToken(v string) *DisassociateTrunkInterfaceOutput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetReturn sets the Return field's value.
+func (s *DisassociateTrunkInterfaceOutput) SetReturn(v bool) *DisassociateTrunkInterfaceOutput {
+ s.Return = &v
+ return s
+}
+
type DisassociateVpcCidrBlockInput struct {
_ struct{} `type:"structure"`
@@ -78422,15 +83595,15 @@ type EbsBlockDevice struct {
// Indicates whether the EBS volume is deleted on instance termination. For
// more information, see Preserving Amazon EBS volumes on instance termination
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#preserving-volumes-on-termination)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
// Indicates whether the encryption state of an EBS volume is changed while
// being restored from a backing snapshot. The effect of setting the encryption
// state to true depends on the volume origin (new or from a snapshot), starting
// encryption state, ownership, and whether encryption by default is enabled.
- // For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-parameters)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-parameters)
+ // in the Amazon EC2 User Guide.
//
// In no case can you remove encryption from an encrypted volume.
//
@@ -78471,6 +83644,9 @@ type EbsBlockDevice struct {
// and RequestSpotInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html).
KmsKeyId *string `type:"string"`
+ // The ARN of the Outpost on which the snapshot is stored.
+ OutpostArn *string `locationName:"outpostArn" type:"string"`
+
// The ID of the snapshot.
SnapshotId *string `locationName:"snapshotId" type:"string"`
@@ -78492,16 +83668,14 @@ type EbsBlockDevice struct {
//
// * io1 and io2: 4-16,384
//
- // * st1: 500-16,384
- //
- // * sc1: 500-16,384
+ // * st1 and sc1: 125-16,384
//
// * standard: 1-1,024
VolumeSize *int64 `locationName:"volumeSize" type:"integer"`
// The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
- // in the Amazon Elastic Compute Cloud User Guide. If the volume type is io1
- // or io2, you must specify the IOPS that the volume supports.
+ // in the Amazon EC2 User Guide. If the volume type is io1 or io2, you must
+ // specify the IOPS that the volume supports.
VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"`
}
@@ -78539,6 +83713,12 @@ func (s *EbsBlockDevice) SetKmsKeyId(v string) *EbsBlockDevice {
return s
}
+// SetOutpostArn sets the OutpostArn field's value.
+func (s *EbsBlockDevice) SetOutpostArn(v string) *EbsBlockDevice {
+ s.OutpostArn = &v
+ return s
+}
+
// SetSnapshotId sets the SnapshotId field's value.
func (s *EbsBlockDevice) SetSnapshotId(v string) *EbsBlockDevice {
s.SnapshotId = &v
@@ -78571,8 +83751,8 @@ type EbsInfo struct {
EbsOptimizedInfo *EbsOptimizedInfo `locationName:"ebsOptimizedInfo" type:"structure"`
// Indicates whether the instance type is Amazon EBS-optimized. For more information,
- // see Amazon EBS-Optimized Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
- // in Amazon EC2 User Guide for Linux Instances.
+ // see Amazon EBS-optimized instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
+ // in Amazon EC2 User Guide.
EbsOptimizedSupport *string `locationName:"ebsOptimizedSupport" type:"string" enum:"EbsOptimizedSupport"`
// Indicates whether Amazon EBS encryption is supported.
@@ -78776,6 +83956,30 @@ func (s *EbsOptimizedInfo) SetMaximumThroughputInMBps(v float64) *EbsOptimizedIn
return s
}
+// Describes the Elastic Fabric Adapters for the instance type.
+type EfaInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The maximum number of Elastic Fabric Adapters for the instance type.
+ MaximumEfaInterfaces *int64 `locationName:"maximumEfaInterfaces" type:"integer"`
+}
+
+// String returns the string representation
+func (s EfaInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EfaInfo) GoString() string {
+ return s.String()
+}
+
+// SetMaximumEfaInterfaces sets the MaximumEfaInterfaces field's value.
+func (s *EfaInfo) SetMaximumEfaInterfaces(v int64) *EfaInfo {
+ s.MaximumEfaInterfaces = &v
+ return s
+}
+
// Describes an egress-only internet gateway.
type EgressOnlyInternetGateway struct {
_ struct{} `type:"structure"`
@@ -79316,11 +84520,12 @@ type EnableFastSnapshotRestoreSuccessItem struct {
// The time at which fast snapshot restores entered the optimizing state.
OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"`
- // The AWS owner alias that enabled fast snapshot restores on the snapshot.
- // This is intended for future use.
+ // The Amazon Web Services owner alias that enabled fast snapshot restores on
+ // the snapshot. This is intended for future use.
OwnerAlias *string `locationName:"ownerAlias" type:"string"`
- // The ID of the AWS account that enabled fast snapshot restores on the snapshot.
+ // The ID of the Amazon Web Services account that enabled fast snapshot restores
+ // on the snapshot.
OwnerId *string `locationName:"ownerId" type:"string"`
// The ID of the snapshot.
@@ -79430,7 +84635,8 @@ type EnableFastSnapshotRestoresInput struct {
DryRun *bool `type:"boolean"`
// The IDs of one or more snapshots. For example, snap-1234567890abcdef0. You
- // can specify a snapshot that was shared with you from another AWS account.
+ // can specify a snapshot that was shared with you from another Amazon Web Services
+ // account.
//
// SourceSnapshotIds is a required field
SourceSnapshotIds []*string `locationName:"SourceSnapshotId" locationNameList:"SnapshotId" type:"list" required:"true"`
@@ -79514,6 +84720,149 @@ func (s *EnableFastSnapshotRestoresOutput) SetUnsuccessful(v []*EnableFastSnapsh
return s
}
+type EnableImageDeprecationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The date and time to deprecate the AMI, in UTC, in the following format:
+ // YYYY-MM-DDTHH:MM:SSZ. If you specify a value for seconds, Amazon EC2 rounds
+ // the seconds to the nearest minute.
+ //
+ // You can’t specify a date in the past. The upper limit for DeprecateAt is
+ // 10 years from now.
+ //
+ // DeprecateAt is a required field
+ DeprecateAt *time.Time `type:"timestamp" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the AMI.
+ //
+ // ImageId is a required field
+ ImageId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s EnableImageDeprecationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EnableImageDeprecationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *EnableImageDeprecationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "EnableImageDeprecationInput"}
+ if s.DeprecateAt == nil {
+ invalidParams.Add(request.NewErrParamRequired("DeprecateAt"))
+ }
+ if s.ImageId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDeprecateAt sets the DeprecateAt field's value.
+func (s *EnableImageDeprecationInput) SetDeprecateAt(v time.Time) *EnableImageDeprecationInput {
+ s.DeprecateAt = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *EnableImageDeprecationInput) SetDryRun(v bool) *EnableImageDeprecationInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *EnableImageDeprecationInput) SetImageId(v string) *EnableImageDeprecationInput {
+ s.ImageId = &v
+ return s
+}
+
+type EnableImageDeprecationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Returns true if the request succeeds; otherwise, it returns an error.
+ Return *bool `locationName:"return" type:"boolean"`
+}
+
+// String returns the string representation
+func (s EnableImageDeprecationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EnableImageDeprecationOutput) GoString() string {
+ return s.String()
+}
+
+// SetReturn sets the Return field's value.
+func (s *EnableImageDeprecationOutput) SetReturn(v bool) *EnableImageDeprecationOutput {
+ s.Return = &v
+ return s
+}
+
+type EnableSerialConsoleAccessInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s EnableSerialConsoleAccessInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EnableSerialConsoleAccessInput) GoString() string {
+ return s.String()
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *EnableSerialConsoleAccessInput) SetDryRun(v bool) *EnableSerialConsoleAccessInput {
+ s.DryRun = &v
+ return s
+}
+
+type EnableSerialConsoleAccessOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If true, access to the EC2 serial console of all instances is enabled for
+ // your account. If false, access to the EC2 serial console of all instances
+ // is disabled for your account.
+ SerialConsoleAccessEnabled *bool `locationName:"serialConsoleAccessEnabled" type:"boolean"`
+}
+
+// String returns the string representation
+func (s EnableSerialConsoleAccessOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EnableSerialConsoleAccessOutput) GoString() string {
+ return s.String()
+}
+
+// SetSerialConsoleAccessEnabled sets the SerialConsoleAccessEnabled field's value.
+func (s *EnableSerialConsoleAccessOutput) SetSerialConsoleAccessEnabled(v bool) *EnableSerialConsoleAccessOutput {
+ s.SerialConsoleAccessEnabled = &v
+ return s
+}
+
type EnableTransitGatewayRouteTablePropagationInput struct {
_ struct{} `type:"structure"`
@@ -79866,12 +85215,13 @@ func (s *EnableVpcClassicLinkOutput) SetReturn(v bool) *EnableVpcClassicLinkOutp
return s
}
-// Indicates whether the instance is enabled for AWS Nitro Enclaves.
+// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
type EnclaveOptions struct {
_ struct{} `type:"structure"`
- // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves;
- // otherwise, it is not enabled for AWS Nitro Enclaves.
+ // If this parameter is set to true, the instance is enabled for Amazon Web
+ // Services Nitro Enclaves; otherwise, it is not enabled for Amazon Web Services
+ // Nitro Enclaves.
Enabled *bool `locationName:"enabled" type:"boolean"`
}
@@ -79891,13 +85241,14 @@ func (s *EnclaveOptions) SetEnabled(v bool) *EnclaveOptions {
return s
}
-// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more
-// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
-// in the AWS Nitro Enclaves User Guide.
+// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+// For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
+// in the Amazon Web Services Nitro Enclaves User Guide.
type EnclaveOptionsRequest struct {
_ struct{} `type:"structure"`
- // To enable the instance for AWS Nitro Enclaves, set this parameter to true.
+ // To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter
+ // to true.
Enabled *bool `type:"boolean"`
}
@@ -79961,7 +85312,7 @@ type EventInformation struct {
// * modify_in_progress - A request to modify the EC2 Fleet or Spot Fleet
// request was accepted and is in progress.
//
- // * modify_successful - The EC2 Fleet or Spot Fleet request was modified.
+ // * modify_succeeded - The EC2 Fleet or Spot Fleet request was modified.
//
// * price_update - The price for a launch configuration was adjusted because
// it was too high. This change is permanent.
@@ -80633,12 +85984,13 @@ type ExportImageInput struct {
RoleName *string `type:"string"`
// Information about the destination Amazon S3 bucket. The bucket must exist
- // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.
+ // and grant WRITE and READ_ACP permissions to the Amazon Web Services account
+ // vm-import-export@amazon.com.
//
// S3ExportLocation is a required field
S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"`
- // The tags to apply to the image being exported.
+ // The tags to apply to the export image task during creation.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
}
@@ -80756,7 +86108,7 @@ type ExportImageOutput struct {
// The status message for the export image task.
StatusMessage *string `locationName:"statusMessage" type:"string"`
- // Any tags assigned to the image being exported.
+ // Any tags assigned to the export image task.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
}
@@ -80856,7 +86208,7 @@ type ExportImageTask struct {
// The status message for the export image task.
StatusMessage *string `locationName:"statusMessage" type:"string"`
- // Any tags assigned to the image being exported.
+ // Any tags assigned to the export image task.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
}
@@ -80918,7 +86270,7 @@ func (s *ExportImageTask) SetTags(v []*Tag) *ExportImageTask {
return s
}
-// Describes an instance export task.
+// Describes an export instance task.
type ExportTask struct {
_ struct{} `type:"structure"`
@@ -81077,7 +86429,7 @@ func (s *ExportTaskS3LocationRequest) SetS3Prefix(v string) *ExportTaskS3Locatio
return s
}
-// Describes the format and location for an instance export task.
+// Describes the format and location for the export task.
type ExportToS3Task struct {
_ struct{} `type:"structure"`
@@ -81089,7 +86441,8 @@ type ExportToS3Task struct {
DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"`
// The Amazon S3 bucket for the destination image. The destination bucket must
- // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.
+ // exist and grant WRITE and READ_ACP permissions to the Amazon Web Services
+ // account vm-import-export@amazon.com.
S3Bucket *string `locationName:"s3Bucket" type:"string"`
// The encryption key for your S3 bucket.
@@ -81130,7 +86483,7 @@ func (s *ExportToS3Task) SetS3Key(v string) *ExportToS3Task {
return s
}
-// Describes an instance export task.
+// Describes an export instance task.
type ExportToS3TaskSpecification struct {
_ struct{} `type:"structure"`
@@ -81142,7 +86495,8 @@ type ExportToS3TaskSpecification struct {
DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"`
// The Amazon S3 bucket for the destination image. The destination bucket must
- // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.
+ // exist and grant WRITE and READ_ACP permissions to the Amazon Web Services
+ // account vm-import-export@amazon.com.
S3Bucket *string `locationName:"s3Bucket" type:"string"`
// The image is written to a single object in the Amazon S3 bucket at the S3
@@ -81407,29 +86761,7 @@ func (s *FederatedAuthenticationRequest) SetSelfServiceSAMLProviderArn(v string)
// A filter name and value pair that is used to return a more specific list
// of results from a describe operation. Filters can be used to match a set
-// of resources by specific criteria, such as tags, attributes, or IDs. The
-// filters supported by a describe operation are documented with the describe
-// operation. For example:
-//
-// * DescribeAvailabilityZones
-//
-// * DescribeImages
-//
-// * DescribeInstances
-//
-// * DescribeKeyPairs
-//
-// * DescribeSecurityGroups
-//
-// * DescribeSnapshots
-//
-// * DescribeSubnets
-//
-// * DescribeTags
-//
-// * DescribeVolumes
-//
-// * DescribeVpcs
+// of resources by specific criteria, such as tags, attributes, or IDs.
type Filter struct {
_ struct{} `type:"structure"`
@@ -81479,6 +86811,9 @@ type FleetData struct {
// Constraints: Maximum 64 ASCII characters
ClientToken *string `locationName:"clientToken" type:"string"`
+ // Reserved.
+ Context *string `locationName:"context" type:"string"`
+
// The creation date and time of the EC2 Fleet.
CreateTime *time.Time `locationName:"createTime" type:"timestamp"`
@@ -81514,7 +86849,10 @@ type FleetData struct {
// The allocation strategy of On-Demand Instances in an EC2 Fleet.
OnDemandOptions *OnDemandOptions `locationName:"onDemandOptions" type:"structure"`
- // Indicates whether EC2 Fleet should replace unhealthy instances.
+ // Indicates whether EC2 Fleet should replace unhealthy Spot Instances. Supported
+ // only for fleets of type maintain. For more information, see EC2 Fleet health
+ // checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#ec2-fleet-health-checks)
+ // in the Amazon EC2 User Guide.
ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"`
// The configuration of Spot Instances in an EC2 Fleet.
@@ -81576,6 +86914,12 @@ func (s *FleetData) SetClientToken(v string) *FleetData {
return s
}
+// SetContext sets the Context field's value.
+func (s *FleetData) SetContext(v string) *FleetData {
+ s.Context = &v
+ return s
+}
+
// SetCreateTime sets the CreateTime field's value.
func (s *FleetData) SetCreateTime(v time.Time) *FleetData {
s.CreateTime = &v
@@ -81728,6 +87072,9 @@ type FleetLaunchTemplateConfigRequest struct {
// Any parameters that you specify override the same parameters in the launch
// template.
+ //
+ // For fleets of type request and maintain, a maximum of 300 items is allowed
+ // across all launch templates.
Overrides []*FleetLaunchTemplateOverridesRequest `locationNameList:"item" type:"list"`
}
@@ -81784,12 +87131,21 @@ type FleetLaunchTemplateOverrides struct {
// The location where the instance launched, if applicable.
Placement *PlacementResponse `locationName:"placement" type:"structure"`
- // The priority for the launch template override. If AllocationStrategy is set
- // to prioritized, EC2 Fleet uses priority to determine which launch template
- // override to use first in fulfilling On-Demand capacity. The highest priority
- // is launched first. Valid values are whole numbers starting at 0. The lower
- // the number, the higher the priority. If no number is set, the override has
- // the lowest priority.
+ // The priority for the launch template override. The highest priority is launched
+ // first.
+ //
+ // If the On-Demand AllocationStrategy is set to prioritized, EC2 Fleet uses
+ // priority to determine which launch template override to use first in fulfilling
+ // On-Demand capacity.
+ //
+ // If the Spot AllocationStrategy is set to capacity-optimized-prioritized,
+ // EC2 Fleet uses priority on a best-effort basis to determine which launch
+ // template override to use in fulfilling Spot capacity, but optimizes for capacity
+ // first.
+ //
+ // Valid values are whole numbers starting at 0. The lower the number, the higher
+ // the priority. If no number is set, the override has the lowest priority.
+ // You can set the same priority for different launch template overrides.
Priority *float64 `locationName:"priority" type:"double"`
// The ID of the subnet in which to launch the instances.
@@ -81867,12 +87223,21 @@ type FleetLaunchTemplateOverridesRequest struct {
// The location where the instance launched, if applicable.
Placement *Placement `type:"structure"`
- // The priority for the launch template override. If AllocationStrategy is set
- // to prioritized, EC2 Fleet uses priority to determine which launch template
- // override to use first in fulfilling On-Demand capacity. The highest priority
- // is launched first. Valid values are whole numbers starting at 0. The lower
- // the number, the higher the priority. If no number is set, the launch template
- // override has the lowest priority.
+ // The priority for the launch template override. The highest priority is launched
+ // first.
+ //
+ // If the On-Demand AllocationStrategy is set to prioritized, EC2 Fleet uses
+ // priority to determine which launch template override to use first in fulfilling
+ // On-Demand capacity.
+ //
+ // If the Spot AllocationStrategy is set to capacity-optimized-prioritized,
+ // EC2 Fleet uses priority on a best-effort basis to determine which launch
+ // template override to use in fulfilling Spot capacity, but optimizes for capacity
+ // first.
+ //
+ // Valid values are whole numbers starting at 0. The lower the number, the higher
+ // the priority. If no number is set, the launch template override has the lowest
+ // priority. You can set the same priority for different launch template overrides.
Priority *float64 `type:"double"`
// The IDs of the subnets in which to launch the instances. Separate multiple
@@ -82008,7 +87373,7 @@ func (s *FleetLaunchTemplateSpecification) SetVersion(v string) *FleetLaunchTemp
// that can be used by an EC2 Fleet to configure Amazon EC2 instances. For information
// about launch templates, see Launching an instance from a launch template
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
type FleetLaunchTemplateSpecificationRequest struct {
_ struct{} `type:"structure"`
@@ -82106,7 +87471,7 @@ func (s *FleetSpotCapacityRebalance) SetReplacementStrategy(v string) *FleetSpot
// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal
// that your Spot Instance is at an elevated risk of being interrupted. For
// more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-capacity-rebalance)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
type FleetSpotCapacityRebalanceRequest struct {
_ struct{} `type:"structure"`
@@ -82992,8 +88357,8 @@ type GetCapacityReservationUsageOutput struct {
// and time specified in your request. The reserved capacity is no longer
// available for your use.
//
- // * cancelled - The Capacity Reservation was manually cancelled. The reserved
- // capacity is no longer available for your use.
+ // * cancelled - The Capacity Reservation was cancelled. The reserved capacity
+ // is no longer available for your use.
//
// * pending - The Capacity Reservation request was successful but the capacity
// provisioning is still pending.
@@ -83486,7 +88851,7 @@ func (s *GetEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *GetEbsDefaultKmsKeyIdInp
type GetEbsDefaultKmsKeyIdOutput struct {
_ struct{} `type:"structure"`
- // The Amazon Resource Name (ARN) of the default CMK for encryption by default.
+ // The Amazon Resource Name (ARN) of the default KMS key for encryption by default.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
}
@@ -83555,6 +88920,113 @@ func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *Get
return s
}
+type GetFlowLogsIntegrationTemplateInput struct {
+ _ struct{} `type:"structure"`
+
+ // To store the CloudFormation template in Amazon S3, specify the location in
+ // Amazon S3.
+ //
+ // ConfigDeliveryS3DestinationArn is a required field
+ ConfigDeliveryS3DestinationArn *string `type:"string" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the flow log.
+ //
+ // FlowLogId is a required field
+ FlowLogId *string `type:"string" required:"true"`
+
+ // Information about the service integration.
+ //
+ // IntegrateServices is a required field
+ IntegrateServices *IntegrateServices `locationName:"IntegrateService" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s GetFlowLogsIntegrationTemplateInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFlowLogsIntegrationTemplateInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFlowLogsIntegrationTemplateInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetFlowLogsIntegrationTemplateInput"}
+ if s.ConfigDeliveryS3DestinationArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ConfigDeliveryS3DestinationArn"))
+ }
+ if s.FlowLogId == nil {
+ invalidParams.Add(request.NewErrParamRequired("FlowLogId"))
+ }
+ if s.IntegrateServices == nil {
+ invalidParams.Add(request.NewErrParamRequired("IntegrateServices"))
+ }
+ if s.IntegrateServices != nil {
+ if err := s.IntegrateServices.Validate(); err != nil {
+ invalidParams.AddNested("IntegrateServices", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConfigDeliveryS3DestinationArn sets the ConfigDeliveryS3DestinationArn field's value.
+func (s *GetFlowLogsIntegrationTemplateInput) SetConfigDeliveryS3DestinationArn(v string) *GetFlowLogsIntegrationTemplateInput {
+ s.ConfigDeliveryS3DestinationArn = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetFlowLogsIntegrationTemplateInput) SetDryRun(v bool) *GetFlowLogsIntegrationTemplateInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFlowLogId sets the FlowLogId field's value.
+func (s *GetFlowLogsIntegrationTemplateInput) SetFlowLogId(v string) *GetFlowLogsIntegrationTemplateInput {
+ s.FlowLogId = &v
+ return s
+}
+
+// SetIntegrateServices sets the IntegrateServices field's value.
+func (s *GetFlowLogsIntegrationTemplateInput) SetIntegrateServices(v *IntegrateServices) *GetFlowLogsIntegrationTemplateInput {
+ s.IntegrateServices = v
+ return s
+}
+
+type GetFlowLogsIntegrationTemplateOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The generated CloudFormation template.
+ Result *string `locationName:"result" type:"string"`
+}
+
+// String returns the string representation
+func (s GetFlowLogsIntegrationTemplateOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFlowLogsIntegrationTemplateOutput) GoString() string {
+ return s.String()
+}
+
+// SetResult sets the Result field's value.
+func (s *GetFlowLogsIntegrationTemplateOutput) SetResult(v string) *GetFlowLogsIntegrationTemplateOutput {
+ s.Result = &v
+ return s
+}
+
type GetGroupsForCapacityReservationInput struct {
_ struct{} `type:"structure"`
@@ -84319,6 +89791,194 @@ func (s *GetReservedInstancesExchangeQuoteOutput) SetValidationFailureReason(v s
return s
}
+type GetSerialConsoleAccessStatusInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s GetSerialConsoleAccessStatusInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSerialConsoleAccessStatusInput) GoString() string {
+ return s.String()
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetSerialConsoleAccessStatusInput) SetDryRun(v bool) *GetSerialConsoleAccessStatusInput {
+ s.DryRun = &v
+ return s
+}
+
+type GetSerialConsoleAccessStatusOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If true, access to the EC2 serial console of all instances is enabled for
+ // your account. If false, access to the EC2 serial console of all instances
+ // is disabled for your account.
+ SerialConsoleAccessEnabled *bool `locationName:"serialConsoleAccessEnabled" type:"boolean"`
+}
+
+// String returns the string representation
+func (s GetSerialConsoleAccessStatusOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSerialConsoleAccessStatusOutput) GoString() string {
+ return s.String()
+}
+
+// SetSerialConsoleAccessEnabled sets the SerialConsoleAccessEnabled field's value.
+func (s *GetSerialConsoleAccessStatusOutput) SetSerialConsoleAccessEnabled(v bool) *GetSerialConsoleAccessStatusOutput {
+ s.SerialConsoleAccessEnabled = &v
+ return s
+}
+
+type GetSubnetCidrReservationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters.
+ //
+ // * reservationType - The type of reservation (prefix | explicit).
+ //
+ // * subnet-id - The ID of the subnet.
+ //
+ // * tag: - The key/value combination of a tag assigned to the resource.
+ // Use the tag key in the filter name and the tag value as the filter value.
+ // For example, to find all resources that have a tag with the key Owner
+ // and the value TeamA, specify tag:Owner for the filter name and TeamA for
+ // the filter value.
+ //
+ // * tag-key - The key of a tag assigned to the resource. Use this filter
+ // to find all resources assigned a tag with a specific key, regardless of
+ // the tag value.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The ID of the subnet.
+ //
+ // SubnetId is a required field
+ SubnetId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetSubnetCidrReservationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSubnetCidrReservationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSubnetCidrReservationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetSubnetCidrReservationsInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+ if s.SubnetId == nil {
+ invalidParams.Add(request.NewErrParamRequired("SubnetId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetSubnetCidrReservationsInput) SetDryRun(v bool) *GetSubnetCidrReservationsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *GetSubnetCidrReservationsInput) SetFilters(v []*Filter) *GetSubnetCidrReservationsInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *GetSubnetCidrReservationsInput) SetMaxResults(v int64) *GetSubnetCidrReservationsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetSubnetCidrReservationsInput) SetNextToken(v string) *GetSubnetCidrReservationsInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetSubnetId sets the SubnetId field's value.
+func (s *GetSubnetCidrReservationsInput) SetSubnetId(v string) *GetSubnetCidrReservationsInput {
+ s.SubnetId = &v
+ return s
+}
+
+type GetSubnetCidrReservationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // Information about the IPv4 subnet CIDR reservations.
+ SubnetIpv4CidrReservations []*SubnetCidrReservation `locationName:"subnetIpv4CidrReservationSet" locationNameList:"item" type:"list"`
+
+ // Information about the IPv6 subnet CIDR reservations.
+ SubnetIpv6CidrReservations []*SubnetCidrReservation `locationName:"subnetIpv6CidrReservationSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s GetSubnetCidrReservationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSubnetCidrReservationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetSubnetCidrReservationsOutput) SetNextToken(v string) *GetSubnetCidrReservationsOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetSubnetIpv4CidrReservations sets the SubnetIpv4CidrReservations field's value.
+func (s *GetSubnetCidrReservationsOutput) SetSubnetIpv4CidrReservations(v []*SubnetCidrReservation) *GetSubnetCidrReservationsOutput {
+ s.SubnetIpv4CidrReservations = v
+ return s
+}
+
+// SetSubnetIpv6CidrReservations sets the SubnetIpv6CidrReservations field's value.
+func (s *GetSubnetCidrReservationsOutput) SetSubnetIpv6CidrReservations(v []*SubnetCidrReservation) *GetSubnetCidrReservationsOutput {
+ s.SubnetIpv6CidrReservations = v
+ return s
+}
+
type GetTransitGatewayAttachmentPropagationsInput struct {
_ struct{} `type:"structure"`
@@ -85074,7 +90734,7 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier {
// Indicates whether your instance is configured for hibernation. This parameter
// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites).
// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
type HibernationOptions struct {
_ struct{} `type:"structure"`
@@ -85102,7 +90762,7 @@ func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions {
// Indicates whether your instance is configured for hibernation. This parameter
// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites).
// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
type HibernationOptionsRequest struct {
_ struct{} `type:"structure"`
@@ -85229,10 +90889,9 @@ type Host struct {
AllocationTime *time.Time `locationName:"allocationTime" type:"timestamp"`
// Indicates whether the Dedicated Host supports multiple instance types of
- // the same instance family, or a specific instance type only. one indicates
- // that the Dedicated Host supports multiple instance types in the instance
- // family. off indicates that the Dedicated Host supports a single instance
- // type only.
+ // the same instance family. If the value is on, the Dedicated Host supports
+ // multiple instance types in the instance family. If the value is off, the
+ // Dedicated Host supports a single instance type only.
AllowsMultipleInstanceTypes *string `locationName:"allowsMultipleInstanceTypes" type:"string" enum:"AllowsMultipleInstanceTypes"`
// Whether auto-placement is on or off.
@@ -85248,7 +90907,7 @@ type Host struct {
AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `locationName:"clientToken" type:"string"`
// The ID of the Dedicated Host.
@@ -85272,7 +90931,7 @@ type Host struct {
// is true, the host is in a host resource group; otherwise, it is not.
MemberOfServiceLinkedResourceGroup *bool `locationName:"memberOfServiceLinkedResourceGroup" type:"boolean"`
- // The ID of the AWS account that owns the Dedicated Host.
+ // The ID of the Amazon Web Services account that owns the Dedicated Host.
OwnerId *string `locationName:"ownerId" type:"string"`
// The time that the Dedicated Host was released.
@@ -85407,7 +91066,7 @@ type HostInstance struct {
// The instance type (for example, m3.medium) of the running instance.
InstanceType *string `locationName:"instanceType" type:"string"`
- // The ID of the AWS account that owns the instance.
+ // The ID of the Amazon Web Services account that owns the instance.
OwnerId *string `locationName:"ownerId" type:"string"`
}
@@ -85985,9 +91644,18 @@ type Image struct {
// Any block device mapping entries.
BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
+ // The boot mode of the image. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ BootMode *string `locationName:"bootMode" type:"string" enum:"BootModeValues"`
+
// The date and time the image was created.
CreationDate *string `locationName:"creationDate" type:"string"`
+ // The date and time to deprecate the AMI, in UTC, in the following format:
+ // YYYY-MM-DDTHH:MM:SSZ. If you specified a value for seconds, Amazon EC2 rounds
+ // the seconds to the nearest minute.
+ DeprecationTime *string `locationName:"deprecationTime" type:"string"`
+
// The description of the AMI that was provided during image creation.
Description *string `locationName:"description" type:"string"`
@@ -86095,12 +91763,24 @@ func (s *Image) SetBlockDeviceMappings(v []*BlockDeviceMapping) *Image {
return s
}
+// SetBootMode sets the BootMode field's value.
+func (s *Image) SetBootMode(v string) *Image {
+ s.BootMode = &v
+ return s
+}
+
// SetCreationDate sets the CreationDate field's value.
func (s *Image) SetCreationDate(v string) *Image {
s.CreationDate = &v
return s
}
+// SetDeprecationTime sets the DeprecationTime field's value.
+func (s *Image) SetDeprecationTime(v string) *Image {
+ s.DeprecationTime = &v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *Image) SetDescription(v string) *Image {
s.Description = &v
@@ -86251,7 +91931,7 @@ type ImageDiskContainer struct {
// The format of the disk image being imported.
//
- // Valid values: OVA | VHD | VHDX |VMDK
+ // Valid values: OVA | VHD | VHDX | VMDK | RAW
Format *string `type:"string"`
// The ID of the EBS snapshot to be used for importing the snapshot.
@@ -86409,6 +92089,9 @@ type ImportImageInput struct {
// Valid values: i386 | x86_64 | arm64
Architecture *string `type:"string"`
+ // The boot mode of the virtual machine.
+ BootMode *string `type:"string" enum:"BootModeValues"`
+
// The client-specific data.
ClientData *ClientData `type:"structure"`
@@ -86428,9 +92111,8 @@ type ImportImageInput struct {
DryRun *bool `type:"boolean"`
// Specifies whether the destination AMI of the imported image should be encrypted.
- // The default CMK for EBS is used unless you specify a non-default AWS Key
- // Management Service (AWS KMS) CMK using KmsKeyId. For more information, see
- // Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
+ // The default KMS key for EBS is used unless you specify a non-default KMS
+ // key using KmsKeyId. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
Encrypted *bool `type:"boolean"`
@@ -86439,35 +92121,36 @@ type ImportImageInput struct {
// Valid values: xen
Hypervisor *string `type:"string"`
- // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer
- // master key (CMK) to use when creating the encrypted AMI. This parameter is
- // only required if you want to use a non-default CMK; if this parameter is
- // not specified, the default CMK for EBS is used. If a KmsKeyId is specified,
- // the Encrypted flag must also be set.
+ // An identifier for the symmetric KMS key to use when creating the encrypted
+ // AMI. This parameter is only required if you want to use a non-default KMS
+ // key; if this parameter is not specified, the default KMS key for EBS is used.
+ // If a KmsKeyId is specified, the Encrypted flag must also be set.
//
- // The CMK identifier may be provided in any of the following formats:
+ // The KMS key identifier may be provided in any of the following formats:
//
// * Key ID
//
// * Key alias. The alias ARN contains the arn:aws:kms namespace, followed
- // by the Region of the CMK, the AWS account ID of the CMK owner, the alias
- // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
+ // by the Region of the key, the Amazon Web Services account ID of the key
+ // owner, the alias namespace, and then the key alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
// * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed
- // by the Region of the CMK, the AWS account ID of the CMK owner, the key
- // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
+ // by the Region of the key, the Amazon Web Services account ID of the key
+ // owner, the key namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
//
// * ARN using key alias. The alias ARN contains the arn:aws:kms namespace,
- // followed by the Region of the CMK, the AWS account ID of the CMK owner,
- // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
+ // followed by the Region of the key, the Amazon Web Services account ID
+ // of the key owner, the alias namespace, and then the key alias. For example,
+ // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
- // AWS parses KmsKeyId asynchronously, meaning that the action you call may
- // appear to complete even though you provided an invalid identifier. This action
- // will eventually report failure.
+ // Amazon Web Services parses KmsKeyId asynchronously, meaning that the action
+ // you call may appear to complete even though you provided an invalid identifier.
+ // This action will eventually report failure.
//
- // The specified CMK must exist in the Region that the AMI is being copied to.
+ // The specified KMS key must exist in the Region that the AMI is being copied
+ // to.
//
- // Amazon EBS does not support asymmetric CMKs.
+ // Amazon EBS does not support asymmetric KMS keys.
KmsKeyId *string `type:"string"`
// The ARNs of the license configurations.
@@ -86477,12 +92160,12 @@ type ImportImageInput struct {
//
// By default, we detect the source-system operating system (OS) and apply the
// appropriate license. Specify AWS to replace the source-system license with
- // an AWS license, if appropriate. Specify BYOL to retain the source-system
- // license, if appropriate.
+ // an Amazon Web Services license, if appropriate. Specify BYOL to retain the
+ // source-system license, if appropriate.
//
// To use BYOL, you must have existing licenses with rights to use these licenses
- // in a third party cloud, such as AWS. For more information, see Prerequisites
- // (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image)
+ // in a third party cloud, such as Amazon Web Services. For more information,
+ // see Prerequisites (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image)
// in the VM Import/Export User Guide.
LicenseType *string `type:"string"`
@@ -86494,8 +92177,13 @@ type ImportImageInput struct {
// The name of the role to use when not using the default role, 'vmimport'.
RoleName *string `type:"string"`
- // The tags to apply to the image being imported.
+ // The tags to apply to the import image task during creation.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+
+ // The usage operation value. For more information, see AMI billing information
+ // fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ UsageOperation *string `type:"string"`
}
// String returns the string representation
@@ -86514,6 +92202,12 @@ func (s *ImportImageInput) SetArchitecture(v string) *ImportImageInput {
return s
}
+// SetBootMode sets the BootMode field's value.
+func (s *ImportImageInput) SetBootMode(v string) *ImportImageInput {
+ s.BootMode = &v
+ return s
+}
+
// SetClientData sets the ClientData field's value.
func (s *ImportImageInput) SetClientData(v *ClientData) *ImportImageInput {
s.ClientData = v
@@ -86592,6 +92286,12 @@ func (s *ImportImageInput) SetTagSpecifications(v []*TagSpecification) *ImportIm
return s
}
+// SetUsageOperation sets the UsageOperation field's value.
+func (s *ImportImageInput) SetUsageOperation(v string) *ImportImageInput {
+ s.UsageOperation = &v
+ return s
+}
+
// The request information of license configurations.
type ImportImageLicenseConfigurationRequest struct {
_ struct{} `type:"structure"`
@@ -86661,8 +92361,8 @@ type ImportImageOutput struct {
// The task ID of the import image task.
ImportTaskId *string `locationName:"importTaskId" type:"string"`
- // The identifier for the symmetric AWS Key Management Service (AWS KMS) customer
- // master key (CMK) that was used to create the encrypted AMI.
+ // The identifier for the symmetric KMS key that was used to create the encrypted
+ // AMI.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// The ARNs of the license configurations.
@@ -86686,8 +92386,11 @@ type ImportImageOutput struct {
// A detailed status message of the import task.
StatusMessage *string `locationName:"statusMessage" type:"string"`
- // Any tags assigned to the image being imported.
+ // Any tags assigned to the import image task.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The usage operation value.
+ UsageOperation *string `locationName:"usageOperation" type:"string"`
}
// String returns the string representation
@@ -86790,6 +92493,12 @@ func (s *ImportImageOutput) SetTags(v []*Tag) *ImportImageOutput {
return s
}
+// SetUsageOperation sets the UsageOperation field's value.
+func (s *ImportImageOutput) SetUsageOperation(v string) *ImportImageOutput {
+ s.UsageOperation = &v
+ return s
+}
+
// Describes an import image task.
type ImportImageTask struct {
_ struct{} `type:"structure"`
@@ -86799,6 +92508,9 @@ type ImportImageTask struct {
// Valid values: i386 | x86_64 | arm64
Architecture *string `locationName:"architecture" type:"string"`
+ // The boot mode of the virtual machine.
+ BootMode *string `locationName:"bootMode" type:"string" enum:"BootModeValues"`
+
// A description of the import task.
Description *string `locationName:"description" type:"string"`
@@ -86816,8 +92528,7 @@ type ImportImageTask struct {
// The ID of the import image task.
ImportTaskId *string `locationName:"importTaskId" type:"string"`
- // The identifier for the AWS Key Management Service (AWS KMS) customer master
- // key (CMK) that was used to create the encrypted image.
+ // The identifier for the KMS key that was used to create the encrypted image.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// The ARNs of the license configurations that are associated with the import
@@ -86844,6 +92555,9 @@ type ImportImageTask struct {
// The tags for the import image task.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The usage operation value.
+ UsageOperation *string `locationName:"usageOperation" type:"string"`
}
// String returns the string representation
@@ -86862,6 +92576,12 @@ func (s *ImportImageTask) SetArchitecture(v string) *ImportImageTask {
return s
}
+// SetBootMode sets the BootMode field's value.
+func (s *ImportImageTask) SetBootMode(v string) *ImportImageTask {
+ s.BootMode = &v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *ImportImageTask) SetDescription(v string) *ImportImageTask {
s.Description = &v
@@ -86946,6 +92666,12 @@ func (s *ImportImageTask) SetTags(v []*Tag) *ImportImageTask {
return s
}
+// SetUsageOperation sets the UsageOperation field's value.
+func (s *ImportImageTask) SetUsageOperation(v string) *ImportImageTask {
+ s.UsageOperation = &v
+ return s
+}
+
type ImportInstanceInput struct {
_ struct{} `type:"structure"`
@@ -87384,7 +93110,7 @@ type ImportKeyPairOutput struct {
// The MD5 public key fingerprint as specified in section 4 of RFC 4716.
KeyFingerprint *string `locationName:"keyFingerprint" type:"string"`
- // The key pair name you provided.
+ // The key pair name that you provided.
KeyName *string `locationName:"keyName" type:"string"`
// The ID of the resulting key pair.
@@ -87450,48 +93176,47 @@ type ImportSnapshotInput struct {
DryRun *bool `type:"boolean"`
// Specifies whether the destination snapshot of the imported image should be
- // encrypted. The default CMK for EBS is used unless you specify a non-default
- // AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information,
- // see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
+ // encrypted. The default KMS key for EBS is used unless you specify a non-default
+ // KMS key using KmsKeyId. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
// in the Amazon Elastic Compute Cloud User Guide.
Encrypted *bool `type:"boolean"`
- // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer
- // master key (CMK) to use when creating the encrypted snapshot. This parameter
- // is only required if you want to use a non-default CMK; if this parameter
- // is not specified, the default CMK for EBS is used. If a KmsKeyId is specified,
- // the Encrypted flag must also be set.
+ // An identifier for the symmetric KMS key to use when creating the encrypted
+ // snapshot. This parameter is only required if you want to use a non-default
+ // KMS key; if this parameter is not specified, the default KMS key for EBS
+ // is used. If a KmsKeyId is specified, the Encrypted flag must also be set.
//
- // The CMK identifier may be provided in any of the following formats:
+ // The KMS key identifier may be provided in any of the following formats:
//
// * Key ID
//
// * Key alias. The alias ARN contains the arn:aws:kms namespace, followed
- // by the Region of the CMK, the AWS account ID of the CMK owner, the alias
- // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
+ // by the Region of the key, the Amazon Web Services account ID of the key
+ // owner, the alias namespace, and then the key alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
// * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed
- // by the Region of the CMK, the AWS account ID of the CMK owner, the key
- // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
+ // by the Region of the key, the Amazon Web Services account ID of the key
+ // owner, the key namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
//
// * ARN using key alias. The alias ARN contains the arn:aws:kms namespace,
- // followed by the Region of the CMK, the AWS account ID of the CMK owner,
- // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
+ // followed by the Region of the key, the Amazon Web Services account ID
+ // of the key owner, the alias namespace, and then the key alias. For example,
+ // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
- // AWS parses KmsKeyId asynchronously, meaning that the action you call may
- // appear to complete even though you provided an invalid identifier. This action
- // will eventually report failure.
+ // Amazon Web Services parses KmsKeyId asynchronously, meaning that the action
+ // you call may appear to complete even though you provided an invalid identifier.
+ // This action will eventually report failure.
//
- // The specified CMK must exist in the Region that the snapshot is being copied
- // to.
+ // The specified KMS key must exist in the Region that the snapshot is being
+ // copied to.
//
- // Amazon EBS does not support asymmetric CMKs.
+ // Amazon EBS does not support asymmetric KMS keys.
KmsKeyId *string `type:"string"`
// The name of the role to use when not using the default role, 'vmimport'.
RoleName *string `type:"string"`
- // The tags to apply to the snapshot being imported.
+ // The tags to apply to the import snapshot task during creation.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
}
@@ -87571,7 +93296,7 @@ type ImportSnapshotOutput struct {
// Information about the import snapshot task.
SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"`
- // Any tags assigned to the snapshot being imported.
+ // Any tags assigned to the import snapshot task.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
}
@@ -87920,6 +93645,10 @@ type Instance struct {
// Any block device mapping entries for the instance.
BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
+ // The boot mode of the instance. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
+ // in the Amazon EC2 User Guide.
+ BootMode *string `locationName:"bootMode" type:"string" enum:"BootModeValues"`
+
// The ID of the Capacity Reservation.
CapacityReservationId *string `locationName:"capacityReservationId" type:"string"`
@@ -87948,7 +93677,7 @@ type Instance struct {
// Specifies whether enhanced networking with ENA is enabled.
EnaSupport *bool `locationName:"enaSupport" type:"boolean"`
- // Indicates whether the instance is enabled for AWS Nitro Enclaves.
+ // Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"`
// Indicates whether the instance is enabled for hibernation.
@@ -88045,12 +93774,7 @@ type Instance struct {
// The security groups for the instance.
SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
- // Specifies whether to enable an instance launched in a VPC to perform NAT.
- // This controls whether source/destination checking is enabled on the instance.
- // A value of true means that checking is enabled, and false means that checking
- // is disabled. The value must be false for the instance to perform NAT. For
- // more information, see NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html)
- // in the Amazon Virtual Private Cloud User Guide.
+ // Indicates whether source/destination checking is enabled.
SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"`
// If the request is a Spot Instance request, the ID of the request.
@@ -88110,6 +93834,12 @@ func (s *Instance) SetBlockDeviceMappings(v []*InstanceBlockDeviceMapping) *Inst
return s
}
+// SetBootMode sets the BootMode field's value.
+func (s *Instance) SetBootMode(v string) *Instance {
+ s.BootMode = &v
+ return s
+}
+
// SetCapacityReservationId sets the CapacityReservationId field's value.
func (s *Instance) SetCapacityReservationId(v string) *Instance {
s.CapacityReservationId = &v
@@ -88612,6 +94342,352 @@ func (s *InstanceCreditSpecificationRequest) SetInstanceId(v string) *InstanceCr
return s
}
+// The event window.
+type InstanceEventWindow struct {
+ _ struct{} `type:"structure"`
+
+ // One or more targets associated with the event window.
+ AssociationTarget *InstanceEventWindowAssociationTarget `locationName:"associationTarget" type:"structure"`
+
+ // The cron expression defined for the event window.
+ CronExpression *string `locationName:"cronExpression" type:"string"`
+
+ // The ID of the event window.
+ InstanceEventWindowId *string `locationName:"instanceEventWindowId" type:"string"`
+
+ // The name of the event window.
+ Name *string `locationName:"name" type:"string"`
+
+ // The current state of the event window.
+ State *string `locationName:"state" type:"string" enum:"InstanceEventWindowState"`
+
+ // The instance tags associated with the event window.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // One or more time ranges defined for the event window.
+ TimeRanges []*InstanceEventWindowTimeRange `locationName:"timeRangeSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s InstanceEventWindow) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEventWindow) GoString() string {
+ return s.String()
+}
+
+// SetAssociationTarget sets the AssociationTarget field's value.
+func (s *InstanceEventWindow) SetAssociationTarget(v *InstanceEventWindowAssociationTarget) *InstanceEventWindow {
+ s.AssociationTarget = v
+ return s
+}
+
+// SetCronExpression sets the CronExpression field's value.
+func (s *InstanceEventWindow) SetCronExpression(v string) *InstanceEventWindow {
+ s.CronExpression = &v
+ return s
+}
+
+// SetInstanceEventWindowId sets the InstanceEventWindowId field's value.
+func (s *InstanceEventWindow) SetInstanceEventWindowId(v string) *InstanceEventWindow {
+ s.InstanceEventWindowId = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *InstanceEventWindow) SetName(v string) *InstanceEventWindow {
+ s.Name = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *InstanceEventWindow) SetState(v string) *InstanceEventWindow {
+ s.State = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *InstanceEventWindow) SetTags(v []*Tag) *InstanceEventWindow {
+ s.Tags = v
+ return s
+}
+
+// SetTimeRanges sets the TimeRanges field's value.
+func (s *InstanceEventWindow) SetTimeRanges(v []*InstanceEventWindowTimeRange) *InstanceEventWindow {
+ s.TimeRanges = v
+ return s
+}
+
+// One or more targets associated with the specified event window. Only one
+// type of target (instance ID, instance tag, or Dedicated Host ID) can be associated
+// with an event window.
+type InstanceEventWindowAssociationRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The IDs of the Dedicated Hosts to associate with the event window.
+ DedicatedHostIds []*string `locationName:"DedicatedHostId" locationNameList:"item" type:"list"`
+
+ // The IDs of the instances to associate with the event window. If the instance
+ // is on a Dedicated Host, you can't specify the Instance ID parameter; you
+ // must use the Dedicated Host ID parameter.
+ InstanceIds []*string `locationName:"InstanceId" locationNameList:"item" type:"list"`
+
+ // The instance tags to associate with the event window. Any instances associated
+ // with the tags will be associated with the event window.
+ InstanceTags []*Tag `locationName:"InstanceTag" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s InstanceEventWindowAssociationRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEventWindowAssociationRequest) GoString() string {
+ return s.String()
+}
+
+// SetDedicatedHostIds sets the DedicatedHostIds field's value.
+func (s *InstanceEventWindowAssociationRequest) SetDedicatedHostIds(v []*string) *InstanceEventWindowAssociationRequest {
+ s.DedicatedHostIds = v
+ return s
+}
+
+// SetInstanceIds sets the InstanceIds field's value.
+func (s *InstanceEventWindowAssociationRequest) SetInstanceIds(v []*string) *InstanceEventWindowAssociationRequest {
+ s.InstanceIds = v
+ return s
+}
+
+// SetInstanceTags sets the InstanceTags field's value.
+func (s *InstanceEventWindowAssociationRequest) SetInstanceTags(v []*Tag) *InstanceEventWindowAssociationRequest {
+ s.InstanceTags = v
+ return s
+}
+
+// One or more targets associated with the event window.
+type InstanceEventWindowAssociationTarget struct {
+ _ struct{} `type:"structure"`
+
+ // The IDs of the Dedicated Hosts associated with the event window.
+ DedicatedHostIds []*string `locationName:"dedicatedHostIdSet" locationNameList:"item" type:"list"`
+
+ // The IDs of the instances associated with the event window.
+ InstanceIds []*string `locationName:"instanceIdSet" locationNameList:"item" type:"list"`
+
+ // The instance tags associated with the event window. Any instances associated
+ // with the tags will be associated with the event window.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s InstanceEventWindowAssociationTarget) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEventWindowAssociationTarget) GoString() string {
+ return s.String()
+}
+
+// SetDedicatedHostIds sets the DedicatedHostIds field's value.
+func (s *InstanceEventWindowAssociationTarget) SetDedicatedHostIds(v []*string) *InstanceEventWindowAssociationTarget {
+ s.DedicatedHostIds = v
+ return s
+}
+
+// SetInstanceIds sets the InstanceIds field's value.
+func (s *InstanceEventWindowAssociationTarget) SetInstanceIds(v []*string) *InstanceEventWindowAssociationTarget {
+ s.InstanceIds = v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *InstanceEventWindowAssociationTarget) SetTags(v []*Tag) *InstanceEventWindowAssociationTarget {
+ s.Tags = v
+ return s
+}
+
+// The targets to disassociate from the specified event window.
+type InstanceEventWindowDisassociationRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The IDs of the Dedicated Hosts to disassociate from the event window.
+ DedicatedHostIds []*string `locationName:"DedicatedHostId" locationNameList:"item" type:"list"`
+
+ // The IDs of the instances to disassociate from the event window.
+ InstanceIds []*string `locationName:"InstanceId" locationNameList:"item" type:"list"`
+
+ // The instance tags to disassociate from the event window. Any instances associated
+ // with the tags will be disassociated from the event window.
+ InstanceTags []*Tag `locationName:"InstanceTag" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s InstanceEventWindowDisassociationRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEventWindowDisassociationRequest) GoString() string {
+ return s.String()
+}
+
+// SetDedicatedHostIds sets the DedicatedHostIds field's value.
+func (s *InstanceEventWindowDisassociationRequest) SetDedicatedHostIds(v []*string) *InstanceEventWindowDisassociationRequest {
+ s.DedicatedHostIds = v
+ return s
+}
+
+// SetInstanceIds sets the InstanceIds field's value.
+func (s *InstanceEventWindowDisassociationRequest) SetInstanceIds(v []*string) *InstanceEventWindowDisassociationRequest {
+ s.InstanceIds = v
+ return s
+}
+
+// SetInstanceTags sets the InstanceTags field's value.
+func (s *InstanceEventWindowDisassociationRequest) SetInstanceTags(v []*Tag) *InstanceEventWindowDisassociationRequest {
+ s.InstanceTags = v
+ return s
+}
+
+// The state of the event window.
+type InstanceEventWindowStateChange struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the event window.
+ InstanceEventWindowId *string `locationName:"instanceEventWindowId" type:"string"`
+
+ // The current state of the event window.
+ State *string `locationName:"state" type:"string" enum:"InstanceEventWindowState"`
+}
+
+// String returns the string representation
+func (s InstanceEventWindowStateChange) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEventWindowStateChange) GoString() string {
+ return s.String()
+}
+
+// SetInstanceEventWindowId sets the InstanceEventWindowId field's value.
+func (s *InstanceEventWindowStateChange) SetInstanceEventWindowId(v string) *InstanceEventWindowStateChange {
+ s.InstanceEventWindowId = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *InstanceEventWindowStateChange) SetState(v string) *InstanceEventWindowStateChange {
+ s.State = &v
+ return s
+}
+
+// The start day and time and the end day and time of the time range, in UTC.
+type InstanceEventWindowTimeRange struct {
+ _ struct{} `type:"structure"`
+
+ // The hour when the time range ends.
+ EndHour *int64 `locationName:"endHour" type:"integer"`
+
+ // The day on which the time range ends.
+ EndWeekDay *string `locationName:"endWeekDay" type:"string" enum:"WeekDay"`
+
+ // The hour when the time range begins.
+ StartHour *int64 `locationName:"startHour" type:"integer"`
+
+ // The day on which the time range begins.
+ StartWeekDay *string `locationName:"startWeekDay" type:"string" enum:"WeekDay"`
+}
+
+// String returns the string representation
+func (s InstanceEventWindowTimeRange) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEventWindowTimeRange) GoString() string {
+ return s.String()
+}
+
+// SetEndHour sets the EndHour field's value.
+func (s *InstanceEventWindowTimeRange) SetEndHour(v int64) *InstanceEventWindowTimeRange {
+ s.EndHour = &v
+ return s
+}
+
+// SetEndWeekDay sets the EndWeekDay field's value.
+func (s *InstanceEventWindowTimeRange) SetEndWeekDay(v string) *InstanceEventWindowTimeRange {
+ s.EndWeekDay = &v
+ return s
+}
+
+// SetStartHour sets the StartHour field's value.
+func (s *InstanceEventWindowTimeRange) SetStartHour(v int64) *InstanceEventWindowTimeRange {
+ s.StartHour = &v
+ return s
+}
+
+// SetStartWeekDay sets the StartWeekDay field's value.
+func (s *InstanceEventWindowTimeRange) SetStartWeekDay(v string) *InstanceEventWindowTimeRange {
+ s.StartWeekDay = &v
+ return s
+}
+
+// The start day and time and the end day and time of the time range, in UTC.
+type InstanceEventWindowTimeRangeRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The hour when the time range ends.
+ EndHour *int64 `type:"integer"`
+
+ // The day on which the time range ends.
+ EndWeekDay *string `type:"string" enum:"WeekDay"`
+
+ // The hour when the time range begins.
+ StartHour *int64 `type:"integer"`
+
+ // The day on which the time range begins.
+ StartWeekDay *string `type:"string" enum:"WeekDay"`
+}
+
+// String returns the string representation
+func (s InstanceEventWindowTimeRangeRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEventWindowTimeRangeRequest) GoString() string {
+ return s.String()
+}
+
+// SetEndHour sets the EndHour field's value.
+func (s *InstanceEventWindowTimeRangeRequest) SetEndHour(v int64) *InstanceEventWindowTimeRangeRequest {
+ s.EndHour = &v
+ return s
+}
+
+// SetEndWeekDay sets the EndWeekDay field's value.
+func (s *InstanceEventWindowTimeRangeRequest) SetEndWeekDay(v string) *InstanceEventWindowTimeRangeRequest {
+ s.EndWeekDay = &v
+ return s
+}
+
+// SetStartHour sets the StartHour field's value.
+func (s *InstanceEventWindowTimeRangeRequest) SetStartHour(v int64) *InstanceEventWindowTimeRangeRequest {
+ s.StartHour = &v
+ return s
+}
+
+// SetStartWeekDay sets the StartWeekDay field's value.
+func (s *InstanceEventWindowTimeRangeRequest) SetStartWeekDay(v string) *InstanceEventWindowTimeRangeRequest {
+ s.StartWeekDay = &v
+ return s
+}
+
// Describes an instance to export.
type InstanceExportDetails struct {
_ struct{} `type:"structure"`
@@ -88680,6 +94756,30 @@ func (s *InstanceFamilyCreditSpecification) SetInstanceFamily(v string) *Instanc
return s
}
+// Information about an IPv4 prefix.
+type InstanceIpv4Prefix struct {
+ _ struct{} `type:"structure"`
+
+ // One or more IPv4 prefixes assigned to the network interface.
+ Ipv4Prefix *string `locationName:"ipv4Prefix" type:"string"`
+}
+
+// String returns the string representation
+func (s InstanceIpv4Prefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceIpv4Prefix) GoString() string {
+ return s.String()
+}
+
+// SetIpv4Prefix sets the Ipv4Prefix field's value.
+func (s *InstanceIpv4Prefix) SetIpv4Prefix(v string) *InstanceIpv4Prefix {
+ s.Ipv4Prefix = &v
+ return s
+}
+
// Describes an IPv6 address.
type InstanceIpv6Address struct {
_ struct{} `type:"structure"`
@@ -88728,6 +94828,30 @@ func (s *InstanceIpv6AddressRequest) SetIpv6Address(v string) *InstanceIpv6Addre
return s
}
+// Information about an IPv6 prefix.
+type InstanceIpv6Prefix struct {
+ _ struct{} `type:"structure"`
+
+ // One or more IPv6 prefixes assigned to the network interface.
+ Ipv6Prefix *string `locationName:"ipv6Prefix" type:"string"`
+}
+
+// String returns the string representation
+func (s InstanceIpv6Prefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceIpv6Prefix) GoString() string {
+ return s.String()
+}
+
+// SetIpv6Prefix sets the Ipv6Prefix field's value.
+func (s *InstanceIpv6Prefix) SetIpv6Prefix(v string) *InstanceIpv6Prefix {
+ s.Ipv6Prefix = &v
+ return s
+}
+
// Describes the market (purchasing) option for the instances.
type InstanceMarketOptionsRequest struct {
_ struct{} `type:"structure"`
@@ -88772,6 +94896,9 @@ type InstanceMetadataOptionsRequest struct {
// metadata.
HttpEndpoint *string `type:"string" enum:"InstanceMetadataEndpointState"`
+ // Enables or disables the IPv6 endpoint for the instance metadata service.
+ HttpProtocolIpv6 *string `type:"string" enum:"InstanceMetadataProtocolState"`
+
// The desired HTTP PUT response hop limit for instance metadata requests. The
// larger the number, the further instance metadata requests can travel.
//
@@ -88812,6 +94939,12 @@ func (s *InstanceMetadataOptionsRequest) SetHttpEndpoint(v string) *InstanceMeta
return s
}
+// SetHttpProtocolIpv6 sets the HttpProtocolIpv6 field's value.
+func (s *InstanceMetadataOptionsRequest) SetHttpProtocolIpv6(v string) *InstanceMetadataOptionsRequest {
+ s.HttpProtocolIpv6 = &v
+ return s
+}
+
// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value.
func (s *InstanceMetadataOptionsRequest) SetHttpPutResponseHopLimit(v int64) *InstanceMetadataOptionsRequest {
s.HttpPutResponseHopLimit = &v
@@ -88835,6 +94968,10 @@ type InstanceMetadataOptionsResponse struct {
// metadata.
HttpEndpoint *string `locationName:"httpEndpoint" type:"string" enum:"InstanceMetadataEndpointState"`
+ // Whether or not the IPv6 endpoint for the instance metadata service is enabled
+ // or disabled.
+ HttpProtocolIpv6 *string `locationName:"httpProtocolIpv6" type:"string" enum:"InstanceMetadataProtocolState"`
+
// The desired HTTP PUT response hop limit for instance metadata requests. The
// larger the number, the further instance metadata requests can travel.
//
@@ -88883,6 +95020,12 @@ func (s *InstanceMetadataOptionsResponse) SetHttpEndpoint(v string) *InstanceMet
return s
}
+// SetHttpProtocolIpv6 sets the HttpProtocolIpv6 field's value.
+func (s *InstanceMetadataOptionsResponse) SetHttpProtocolIpv6(v string) *InstanceMetadataOptionsResponse {
+ s.HttpProtocolIpv6 = &v
+ return s
+}
+
// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value.
func (s *InstanceMetadataOptionsResponse) SetHttpPutResponseHopLimit(v int64) *InstanceMetadataOptionsResponse {
s.HttpPutResponseHopLimit = &v
@@ -88953,19 +95096,25 @@ type InstanceNetworkInterface struct {
// Describes the type of network interface.
//
- // Valid values: interface | efa
+ // Valid values: interface | efa | trunk
InterfaceType *string `locationName:"interfaceType" type:"string"`
+ // The IPv4 delegated prefixes that are assigned to the network interface.
+ Ipv4Prefixes []*InstanceIpv4Prefix `locationName:"ipv4PrefixSet" locationNameList:"item" type:"list"`
+
// One or more IPv6 addresses associated with the network interface.
Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"`
+ // The IPv6 delegated prefixes that are assigned to the network interface.
+ Ipv6Prefixes []*InstanceIpv6Prefix `locationName:"ipv6PrefixSet" locationNameList:"item" type:"list"`
+
// The MAC address.
MacAddress *string `locationName:"macAddress" type:"string"`
// The ID of the network interface.
NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
- // The ID of the AWS account that created the network interface.
+ // The ID of the Amazon Web Services account that created the network interface.
OwnerId *string `locationName:"ownerId" type:"string"`
// The private DNS name.
@@ -88977,7 +95126,7 @@ type InstanceNetworkInterface struct {
// One or more private IPv4 addresses associated with the network interface.
PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"`
- // Indicates whether to validate network traffic to or from this network interface.
+ // Indicates whether source/destination checking is enabled.
SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"`
// The status of the network interface.
@@ -89030,12 +95179,24 @@ func (s *InstanceNetworkInterface) SetInterfaceType(v string) *InstanceNetworkIn
return s
}
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *InstanceNetworkInterface) SetIpv4Prefixes(v []*InstanceIpv4Prefix) *InstanceNetworkInterface {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetIpv6Addresses sets the Ipv6Addresses field's value.
func (s *InstanceNetworkInterface) SetIpv6Addresses(v []*InstanceIpv6Address) *InstanceNetworkInterface {
s.Ipv6Addresses = v
return s
}
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *InstanceNetworkInterface) SetIpv6Prefixes(v []*InstanceIpv6Prefix) *InstanceNetworkInterface {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetMacAddress sets the MacAddress field's value.
func (s *InstanceNetworkInterface) SetMacAddress(v string) *InstanceNetworkInterface {
s.MacAddress = &v
@@ -89224,7 +95385,8 @@ type InstanceNetworkInterfaceSpecification struct {
//
// You can only assign a carrier IP address to a network interface that is in
// a subnet in a Wavelength Zone. For more information about carrier IP addresses,
- // see Carrier IP addresses in the AWS Wavelength Developer Guide.
+ // see Carrier IP addresses in the Amazon Web Services Wavelength Developer
+ // Guide.
AssociateCarrierIpAddress *bool `type:"boolean"`
// Indicates whether to assign a public IPv4 address to an instance you launch
@@ -89260,11 +95422,17 @@ type InstanceNetworkInterfaceSpecification struct {
// see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
- // If you are not creating an EFA, specify interface or omit this parameter.
- //
// Valid values: interface | efa
InterfaceType *string `type:"string"`
+ // The number of IPv4 delegated prefixes to be automatically assigned to the
+ // network interface. You cannot use this option if you use the Ipv4Prefix option.
+ Ipv4PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv4 delegated prefixes to be assigned to the network interface.
+ // You cannot use this option if you use the Ipv4PrefixCount option.
+ Ipv4Prefixes []*Ipv4PrefixSpecificationRequest `locationName:"Ipv4Prefix" locationNameList:"item" type:"list"`
+
// A number of IPv6 addresses to assign to the network interface. Amazon EC2
// chooses the IPv6 addresses from the range of the subnet. You cannot specify
// this option and the option to assign specific IPv6 addresses in the same
@@ -89278,9 +95446,22 @@ type InstanceNetworkInterfaceSpecification struct {
// number of instances to launch.
Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" queryName:"Ipv6Addresses" locationNameList:"item" type:"list"`
+ // The number of IPv6 delegated prefixes to be automatically assigned to the
+ // network interface. You cannot use this option if you use the Ipv6Prefix option.
+ Ipv6PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv6 delegated prefixes to be assigned to the network interface.
+ // You cannot use this option if you use the Ipv6PrefixCount option.
+ Ipv6Prefixes []*Ipv6PrefixSpecificationRequest `locationName:"Ipv6Prefix" locationNameList:"item" type:"list"`
+
// The index of the network card. Some instance types support multiple network
// cards. The primary network interface must be assigned to network card index
// 0. The default is network card index 0.
+ //
+ // If you are using RequestSpotInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html)
+ // to create Spot Instances, omit this parameter because you can’t specify
+ // the network card index when using this API. To specify the network card index,
+ // use RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html).
NetworkCardIndex *int64 `type:"integer"`
// The ID of the network interface.
@@ -89366,6 +95547,18 @@ func (s *InstanceNetworkInterfaceSpecification) SetInterfaceType(v string) *Inst
return s
}
+// SetIpv4PrefixCount sets the Ipv4PrefixCount field's value.
+func (s *InstanceNetworkInterfaceSpecification) SetIpv4PrefixCount(v int64) *InstanceNetworkInterfaceSpecification {
+ s.Ipv4PrefixCount = &v
+ return s
+}
+
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *InstanceNetworkInterfaceSpecification) SetIpv4Prefixes(v []*Ipv4PrefixSpecificationRequest) *InstanceNetworkInterfaceSpecification {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetIpv6AddressCount sets the Ipv6AddressCount field's value.
func (s *InstanceNetworkInterfaceSpecification) SetIpv6AddressCount(v int64) *InstanceNetworkInterfaceSpecification {
s.Ipv6AddressCount = &v
@@ -89378,6 +95571,18 @@ func (s *InstanceNetworkInterfaceSpecification) SetIpv6Addresses(v []*InstanceIp
return s
}
+// SetIpv6PrefixCount sets the Ipv6PrefixCount field's value.
+func (s *InstanceNetworkInterfaceSpecification) SetIpv6PrefixCount(v int64) *InstanceNetworkInterfaceSpecification {
+ s.Ipv6PrefixCount = &v
+ return s
+}
+
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *InstanceNetworkInterfaceSpecification) SetIpv6Prefixes(v []*Ipv6PrefixSpecificationRequest) *InstanceNetworkInterfaceSpecification {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetNetworkCardIndex sets the NetworkCardIndex field's value.
func (s *InstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *InstanceNetworkInterfaceSpecification {
s.NetworkCardIndex = &v
@@ -89954,8 +96159,8 @@ type InstanceTypeInfo struct {
// Indicates whether instance storage is supported.
InstanceStorageSupported *bool `locationName:"instanceStorageSupported" type:"boolean"`
- // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ // in the Amazon EC2 User Guide.
InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
// Describes the memory for the instance type.
@@ -89970,6 +96175,10 @@ type InstanceTypeInfo struct {
// Describes the processor.
ProcessorInfo *ProcessorInfo `locationName:"processorInfo" type:"structure"`
+ // The supported boot modes. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
+ // in the Amazon EC2 User Guide.
+ SupportedBootModes []*string `locationName:"supportedBootModes" locationNameList:"item" type:"list"`
+
// The supported root device types.
SupportedRootDeviceTypes []*string `locationName:"supportedRootDeviceTypes" locationNameList:"item" type:"list"`
@@ -90107,6 +96316,12 @@ func (s *InstanceTypeInfo) SetProcessorInfo(v *ProcessorInfo) *InstanceTypeInfo
return s
}
+// SetSupportedBootModes sets the SupportedBootModes field's value.
+func (s *InstanceTypeInfo) SetSupportedBootModes(v []*string) *InstanceTypeInfo {
+ s.SupportedBootModes = v
+ return s
+}
+
// SetSupportedRootDeviceTypes sets the SupportedRootDeviceTypes field's value.
func (s *InstanceTypeInfo) SetSupportedRootDeviceTypes(v []*string) *InstanceTypeInfo {
s.SupportedRootDeviceTypes = v
@@ -90135,8 +96350,8 @@ func (s *InstanceTypeInfo) SetVCpuInfo(v *VCpuInfo) *InstanceTypeInfo {
type InstanceTypeOffering struct {
_ struct{} `type:"structure"`
- // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ // in the Amazon EC2 User Guide.
InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
// The identifier for the location. This depends on the location type. For example,
@@ -90180,10 +96395,12 @@ func (s *InstanceTypeOffering) SetLocationType(v string) *InstanceTypeOffering {
type InstanceUsage struct {
_ struct{} `type:"structure"`
- // The ID of the AWS account that is making use of the Capacity Reservation.
+ // The ID of the Amazon Web Services account that is making use of the Capacity
+ // Reservation.
AccountId *string `locationName:"accountId" type:"string"`
- // The number of instances the AWS account currently has in the Capacity Reservation.
+ // The number of instances the Amazon Web Services account currently has in
+ // the Capacity Reservation.
UsedInstanceCount *int64 `locationName:"usedInstanceCount" type:"integer"`
}
@@ -90209,6 +96426,53 @@ func (s *InstanceUsage) SetUsedInstanceCount(v int64) *InstanceUsage {
return s
}
+// Describes service integrations with VPC Flow logs.
+type IntegrateServices struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the integration with Amazon Athena.
+ AthenaIntegrations []*AthenaIntegration `locationName:"AthenaIntegration" locationNameList:"item" min:"1" type:"list"`
+}
+
+// String returns the string representation
+func (s IntegrateServices) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s IntegrateServices) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *IntegrateServices) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "IntegrateServices"}
+ if s.AthenaIntegrations != nil && len(s.AthenaIntegrations) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AthenaIntegrations", 1))
+ }
+ if s.AthenaIntegrations != nil {
+ for i, v := range s.AthenaIntegrations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AthenaIntegrations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAthenaIntegrations sets the AthenaIntegrations field's value.
+func (s *IntegrateServices) SetAthenaIntegrations(v []*AthenaIntegration) *IntegrateServices {
+ s.AthenaIntegrations = v
+ return s
+}
+
// Describes an internet gateway.
type InternetGateway struct {
_ struct{} `type:"structure"`
@@ -90219,7 +96483,7 @@ type InternetGateway struct {
// The ID of the internet gateway.
InternetGatewayId *string `locationName:"internetGatewayId" type:"string"`
- // The ID of the AWS account that owns the internet gateway.
+ // The ID of the Amazon Web Services account that owns the internet gateway.
OwnerId *string `locationName:"ownerId" type:"string"`
// Any tags assigned to the internet gateway.
@@ -90329,7 +96593,7 @@ type IpPermission struct {
// types, you must specify all codes.
ToPort *int64 `locationName:"toPort" type:"integer"`
- // The security group and AWS account ID pairs.
+ // The security group and Amazon Web Services account ID pairs.
UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"`
}
@@ -90423,6 +96687,82 @@ func (s *IpRange) SetDescription(v string) *IpRange {
return s
}
+// Describes an IPv4 prefix.
+type Ipv4PrefixSpecification struct {
+ _ struct{} `type:"structure"`
+
+ // The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network
+ // interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ Ipv4Prefix *string `locationName:"ipv4Prefix" type:"string"`
+}
+
+// String returns the string representation
+func (s Ipv4PrefixSpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Ipv4PrefixSpecification) GoString() string {
+ return s.String()
+}
+
+// SetIpv4Prefix sets the Ipv4Prefix field's value.
+func (s *Ipv4PrefixSpecification) SetIpv4Prefix(v string) *Ipv4PrefixSpecification {
+ s.Ipv4Prefix = &v
+ return s
+}
+
+// Describes the IPv4 prefix option for a network interface.
+type Ipv4PrefixSpecificationRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network
+ // interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ Ipv4Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Ipv4PrefixSpecificationRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Ipv4PrefixSpecificationRequest) GoString() string {
+ return s.String()
+}
+
+// SetIpv4Prefix sets the Ipv4Prefix field's value.
+func (s *Ipv4PrefixSpecificationRequest) SetIpv4Prefix(v string) *Ipv4PrefixSpecificationRequest {
+ s.Ipv4Prefix = &v
+ return s
+}
+
+// Information about the IPv4 delegated prefixes assigned to a network interface.
+type Ipv4PrefixSpecificationResponse struct {
+ _ struct{} `type:"structure"`
+
+ // One or more IPv4 delegated prefixes assigned to the network interface.
+ Ipv4Prefix *string `locationName:"ipv4Prefix" type:"string"`
+}
+
+// String returns the string representation
+func (s Ipv4PrefixSpecificationResponse) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Ipv4PrefixSpecificationResponse) GoString() string {
+ return s.String()
+}
+
+// SetIpv4Prefix sets the Ipv4Prefix field's value.
+func (s *Ipv4PrefixSpecificationResponse) SetIpv4Prefix(v string) *Ipv4PrefixSpecificationResponse {
+ s.Ipv4Prefix = &v
+ return s
+}
+
// Describes an IPv6 CIDR block association.
type Ipv6CidrAssociation struct {
_ struct{} `type:"structure"`
@@ -90531,6 +96871,78 @@ func (s *Ipv6Pool) SetTags(v []*Tag) *Ipv6Pool {
return s
}
+// Describes the IPv6 prefix.
+type Ipv6PrefixSpecification struct {
+ _ struct{} `type:"structure"`
+
+ // The IPv6 prefix.
+ Ipv6Prefix *string `locationName:"ipv6Prefix" type:"string"`
+}
+
+// String returns the string representation
+func (s Ipv6PrefixSpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Ipv6PrefixSpecification) GoString() string {
+ return s.String()
+}
+
+// SetIpv6Prefix sets the Ipv6Prefix field's value.
+func (s *Ipv6PrefixSpecification) SetIpv6Prefix(v string) *Ipv6PrefixSpecification {
+ s.Ipv6Prefix = &v
+ return s
+}
+
+// Describes the IPv4 prefix option for a network interface.
+type Ipv6PrefixSpecificationRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The IPv6 prefix.
+ Ipv6Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Ipv6PrefixSpecificationRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Ipv6PrefixSpecificationRequest) GoString() string {
+ return s.String()
+}
+
+// SetIpv6Prefix sets the Ipv6Prefix field's value.
+func (s *Ipv6PrefixSpecificationRequest) SetIpv6Prefix(v string) *Ipv6PrefixSpecificationRequest {
+ s.Ipv6Prefix = &v
+ return s
+}
+
+// Information about the IPv6 delegated prefixes assigned to a network interface.
+type Ipv6PrefixSpecificationResponse struct {
+ _ struct{} `type:"structure"`
+
+ // One or more IPv6 delegated prefixes assigned to the network interface.
+ Ipv6Prefix *string `locationName:"ipv6Prefix" type:"string"`
+}
+
+// String returns the string representation
+func (s Ipv6PrefixSpecificationResponse) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Ipv6PrefixSpecificationResponse) GoString() string {
+ return s.String()
+}
+
+// SetIpv6Prefix sets the Ipv6Prefix field's value.
+func (s *Ipv6PrefixSpecificationResponse) SetIpv6Prefix(v string) *Ipv6PrefixSpecificationResponse {
+ s.Ipv6Prefix = &v
+ return s
+}
+
// [EC2-VPC only] Describes an IPv6 range.
type Ipv6Range struct {
_ struct{} `type:"structure"`
@@ -90573,10 +96985,21 @@ func (s *Ipv6Range) SetDescription(v string) *Ipv6Range {
type KeyPairInfo struct {
_ struct{} `type:"structure"`
- // If you used CreateKeyPair to create the key pair, this is the SHA-1 digest
- // of the DER encoded private key. If you used ImportKeyPair to provide AWS
- // the public key, this is the MD5 public key fingerprint as specified in section
- // 4 of RFC4716.
+ // If you used CreateKeyPair to create the key pair:
+ //
+ // * For RSA key pairs, the key fingerprint is the SHA-1 digest of the DER
+ // encoded private key.
+ //
+ // * For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256
+ // digest, which is the default for OpenSSH, starting with OpenSSH 6.8 (http://www.openssh.com/txt/release-6.8).
+ //
+ // If you used ImportKeyPair to provide Amazon Web Services the public key:
+ //
+ // * For RSA key pairs, the key fingerprint is the MD5 public key fingerprint
+ // as specified in section 4 of RFC4716.
+ //
+ // * For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256
+ // digest, which is the default for OpenSSH, starting with OpenSSH 6.8 (http://www.openssh.com/txt/release-6.8).
KeyFingerprint *string `locationName:"keyFingerprint" type:"string"`
// The name of the key pair.
@@ -90585,6 +97008,9 @@ type KeyPairInfo struct {
// The ID of the key pair.
KeyPairId *string `locationName:"keyPairId" type:"string"`
+ // The type of key pair.
+ KeyType *string `locationName:"keyType" type:"string" enum:"KeyType"`
+
// Any tags applied to the key pair.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
}
@@ -90617,6 +97043,12 @@ func (s *KeyPairInfo) SetKeyPairId(v string) *KeyPairInfo {
return s
}
+// SetKeyType sets the KeyType field's value.
+func (s *KeyPairInfo) SetKeyType(v string) *KeyPairInfo {
+ s.KeyType = &v
+ return s
+}
+
// SetTags sets the Tags field's value.
func (s *KeyPairInfo) SetTags(v []*Tag) *KeyPairInfo {
s.Tags = v
@@ -90664,6 +97096,8 @@ type LaunchPermission struct {
Group *string `locationName:"group" type:"string" enum:"PermissionGroup"`
// The AWS account ID.
+ //
+ // Constraints: Up to 10 000 account IDs can be specified in a single request.
UserId *string `locationName:"userId" type:"string"`
}
@@ -91004,8 +97438,7 @@ type LaunchTemplateBlockDeviceMapping struct {
// Information about the block device for an EBS volume.
Ebs *LaunchTemplateEbsBlockDevice `locationName:"ebs" type:"structure"`
- // Suppresses the specified device included in the block device mapping of the
- // AMI.
+ // To omit the device from the block device mapping, specify an empty string.
NoDevice *string `locationName:"noDevice" type:"string"`
// The virtual device name (ephemeralN).
@@ -91057,8 +97490,7 @@ type LaunchTemplateBlockDeviceMappingRequest struct {
// launched.
Ebs *LaunchTemplateEbsBlockDeviceRequest `type:"structure"`
- // Suppresses the specified device included in the block device mapping of the
- // AMI.
+ // To omit the device from the block device mapping, specify an empty string.
NoDevice *string `type:"string"`
// The virtual device name (ephemeralN). Instance store volumes are numbered
@@ -91320,7 +97752,7 @@ type LaunchTemplateEbsBlockDevice struct {
// The number of I/O operations per second (IOPS) that the volume supports.
Iops *int64 `locationName:"iops" type:"integer"`
- // The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption.
+ // The ARN of the Key Management Service (KMS) CMK used for encryption.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// The ID of the snapshot.
@@ -91423,13 +97855,11 @@ type LaunchTemplateEbsBlockDeviceRequest struct {
// on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// Other instance families guarantee performance up to 32,000 IOPS.
//
- // This parameter is required for io1 and io2 volumes. The default for gp3 volumes
- // is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard
- // volumes.
+ // This parameter is supported for io1, io2, and gp3 volumes only. This parameter
+ // is not supported for gp2, st1, sc1, or standard volumes.
Iops *int64 `type:"integer"`
- // The ARN of the symmetric AWS Key Management Service (AWS KMS) CMK used for
- // encryption.
+ // The ARN of the symmetric Key Management Service (KMS) CMK used for encryption.
KmsKeyId *string `type:"string"`
// The ID of the snapshot.
@@ -91441,11 +97871,8 @@ type LaunchTemplateEbsBlockDeviceRequest struct {
Throughput *int64 `type:"integer"`
// The size of the volume, in GiBs. You must specify either a snapshot ID or
- // a volume size. If you specify a snapshot, the default is the snapshot size.
- // You can specify a volume size that is equal to or larger than the snapshot
- // size.
- //
- // The following are the supported volumes sizes for each volume type:
+ // a volume size. The following are the supported volumes sizes for each volume
+ // type:
//
// * gp2 and gp3: 1-16,384
//
@@ -91456,8 +97883,7 @@ type LaunchTemplateEbsBlockDeviceRequest struct {
// * standard: 1-1,024
VolumeSize *int64 `type:"integer"`
- // The volume type. The default is gp2. For more information, see Amazon EBS
- // volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
+ // The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
// in the Amazon Elastic Compute Cloud User Guide.
VolumeType *string `type:"string" enum:"VolumeType"`
}
@@ -91610,12 +98036,13 @@ func (s *LaunchTemplateElasticInferenceAcceleratorResponse) SetType(v string) *L
return s
}
-// Indicates whether the instance is enabled for AWS Nitro Enclaves.
+// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
type LaunchTemplateEnclaveOptions struct {
_ struct{} `type:"structure"`
- // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves;
- // otherwise, it is not enabled for AWS Nitro Enclaves.
+ // If this parameter is set to true, the instance is enabled for Amazon Web
+ // Services Nitro Enclaves; otherwise, it is not enabled for Amazon Web Services
+ // Nitro Enclaves.
Enabled *bool `locationName:"enabled" type:"boolean"`
}
@@ -91635,13 +98062,14 @@ func (s *LaunchTemplateEnclaveOptions) SetEnabled(v bool) *LaunchTemplateEnclave
return s
}
-// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more
-// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
-// in the AWS Nitro Enclaves User Guide.
+// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+// For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
+// in the Amazon Web Services Nitro Enclaves User Guide.
type LaunchTemplateEnclaveOptionsRequest struct {
_ struct{} `type:"structure"`
- // To enable the instance for AWS Nitro Enclaves, set this parameter to true.
+ // To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter
+ // to true.
Enabled *bool `type:"boolean"`
}
@@ -91858,6 +98286,11 @@ type LaunchTemplateInstanceMetadataOptions struct {
// metadata.
HttpEndpoint *string `locationName:"httpEndpoint" type:"string" enum:"LaunchTemplateInstanceMetadataEndpointState"`
+ // Enables or disables the IPv6 endpoint for the instance metadata service.
+ //
+ // Default: disabled
+ HttpProtocolIpv6 *string `locationName:"httpProtocolIpv6" type:"string" enum:"LaunchTemplateInstanceMetadataProtocolIpv6"`
+
// The desired HTTP PUT response hop limit for instance metadata requests. The
// larger the number, the further instance metadata requests can travel.
//
@@ -91906,6 +98339,12 @@ func (s *LaunchTemplateInstanceMetadataOptions) SetHttpEndpoint(v string) *Launc
return s
}
+// SetHttpProtocolIpv6 sets the HttpProtocolIpv6 field's value.
+func (s *LaunchTemplateInstanceMetadataOptions) SetHttpProtocolIpv6(v string) *LaunchTemplateInstanceMetadataOptions {
+ s.HttpProtocolIpv6 = &v
+ return s
+}
+
// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value.
func (s *LaunchTemplateInstanceMetadataOptions) SetHttpPutResponseHopLimit(v int64) *LaunchTemplateInstanceMetadataOptions {
s.HttpPutResponseHopLimit = &v
@@ -91937,6 +98376,11 @@ type LaunchTemplateInstanceMetadataOptionsRequest struct {
// metadata.
HttpEndpoint *string `type:"string" enum:"LaunchTemplateInstanceMetadataEndpointState"`
+ // Enables or disables the IPv6 endpoint for the instance metadata service.
+ //
+ // Default: disabled
+ HttpProtocolIpv6 *string `type:"string" enum:"LaunchTemplateInstanceMetadataProtocolIpv6"`
+
// The desired HTTP PUT response hop limit for instance metadata requests. The
// larger the number, the further instance metadata requests can travel.
//
@@ -91977,6 +98421,12 @@ func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpEndpoint(v string)
return s
}
+// SetHttpProtocolIpv6 sets the HttpProtocolIpv6 field's value.
+func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpProtocolIpv6(v string) *LaunchTemplateInstanceMetadataOptionsRequest {
+ s.HttpProtocolIpv6 = &v
+ return s
+}
+
// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value.
func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpPutResponseHopLimit(v int64) *LaunchTemplateInstanceMetadataOptionsRequest {
s.HttpPutResponseHopLimit = &v
@@ -91999,7 +98449,7 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct {
// Use this option when you launch an instance in a Wavelength Zone and want
// to associate a Carrier IP address with the network interface. For more information
// about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip)
- // in the AWS Wavelength Developer Guide.
+ // in the Wavelength Developer Guide.
AssociateCarrierIpAddress *bool `locationName:"associateCarrierIpAddress" type:"boolean"`
// Indicates whether to associate a public IPv4 address with eth0 for a new
@@ -92021,12 +98471,26 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct {
// The type of network interface.
InterfaceType *string `locationName:"interfaceType" type:"string"`
+ // The number of IPv4 prefixes that Amazon Web Services automatically assigned
+ // to the network interface.
+ Ipv4PrefixCount *int64 `locationName:"ipv4PrefixCount" type:"integer"`
+
+ // One or more IPv4 prefixes assigned to the network interface.
+ Ipv4Prefixes []*Ipv4PrefixSpecificationResponse `locationName:"ipv4PrefixSet" locationNameList:"item" type:"list"`
+
// The number of IPv6 addresses for the network interface.
Ipv6AddressCount *int64 `locationName:"ipv6AddressCount" type:"integer"`
// The IPv6 addresses for the network interface.
Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"`
+ // The number of IPv6 prefixes that Amazon Web Services automatically assigned
+ // to the network interface.
+ Ipv6PrefixCount *int64 `locationName:"ipv6PrefixCount" type:"integer"`
+
+ // One or more IPv6 prefixes assigned to the network interface.
+ Ipv6Prefixes []*Ipv6PrefixSpecificationResponse `locationName:"ipv6PrefixSet" locationNameList:"item" type:"list"`
+
// The index of the network card.
NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"`
@@ -92098,6 +98562,18 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetInterfaceType(v
return s
}
+// SetIpv4PrefixCount sets the Ipv4PrefixCount field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv4PrefixCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification {
+ s.Ipv4PrefixCount = &v
+ return s
+}
+
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv4Prefixes(v []*Ipv4PrefixSpecificationResponse) *LaunchTemplateInstanceNetworkInterfaceSpecification {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetIpv6AddressCount sets the Ipv6AddressCount field's value.
func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6AddressCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification {
s.Ipv6AddressCount = &v
@@ -92110,6 +98586,18 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6Addresses(v
return s
}
+// SetIpv6PrefixCount sets the Ipv6PrefixCount field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6PrefixCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification {
+ s.Ipv6PrefixCount = &v
+ return s
+}
+
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6Prefixes(v []*Ipv6PrefixSpecificationResponse) *LaunchTemplateInstanceNetworkInterfaceSpecification {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetNetworkCardIndex sets the NetworkCardIndex field's value.
func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification {
s.NetworkCardIndex = &v
@@ -92155,7 +98643,7 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct {
// Use this option when you launch an instance in a Wavelength Zone and want
// to associate a Carrier IP address with the network interface. For more information
// about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip)
- // in the AWS Wavelength Developer Guide.
+ // in the Wavelength Developer Guide.
AssociateCarrierIpAddress *bool `type:"boolean"`
// Associates a public IPv4 address with eth0 for a new network interface.
@@ -92182,6 +98670,14 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct {
// Valid values: interface | efa
InterfaceType *string `type:"string"`
+ // The number of IPv4 prefixes to be automatically assigned to the network interface.
+ // You cannot use this option if you use the Ipv4Prefix option.
+ Ipv4PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv4 prefixes to be assigned to the network interface. You cannot
+ // use this option if you use the Ipv4PrefixCount option.
+ Ipv4Prefixes []*Ipv4PrefixSpecificationRequest `locationName:"Ipv4Prefix" locationNameList:"item" type:"list"`
+
// The number of IPv6 addresses to assign to a network interface. Amazon EC2
// automatically selects the IPv6 addresses from the subnet range. You can't
// use this option if specifying specific IPv6 addresses.
@@ -92191,6 +98687,14 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct {
// subnet. You can't use this option if you're specifying a number of IPv6 addresses.
Ipv6Addresses []*InstanceIpv6AddressRequest `locationNameList:"InstanceIpv6Address" type:"list"`
+ // The number of IPv6 prefixes to be automatically assigned to the network interface.
+ // You cannot use this option if you use the Ipv6Prefix option.
+ Ipv6PrefixCount *int64 `type:"integer"`
+
+ // One or more IPv6 prefixes to be assigned to the network interface. You cannot
+ // use this option if you use the Ipv6PrefixCount option.
+ Ipv6Prefixes []*Ipv6PrefixSpecificationRequest `locationName:"Ipv6Prefix" locationNameList:"item" type:"list"`
+
// The index of the network card. Some instance types support multiple network
// cards. The primary network interface must be assigned to network card index
// 0. The default is network card index 0.
@@ -92264,6 +98768,18 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetInterfac
return s
}
+// SetIpv4PrefixCount sets the Ipv4PrefixCount field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv4PrefixCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest {
+ s.Ipv4PrefixCount = &v
+ return s
+}
+
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv4Prefixes(v []*Ipv4PrefixSpecificationRequest) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetIpv6AddressCount sets the Ipv6AddressCount field's value.
func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6AddressCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest {
s.Ipv6AddressCount = &v
@@ -92276,6 +98792,18 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6Addr
return s
}
+// SetIpv6PrefixCount sets the Ipv6PrefixCount field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6PrefixCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest {
+ s.Ipv6PrefixCount = &v
+ return s
+}
+
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6Prefixes(v []*Ipv6PrefixSpecificationRequest) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetNetworkCardIndex sets the NetworkCardIndex field's value.
func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest {
s.NetworkCardIndex = &v
@@ -92370,12 +98898,20 @@ type LaunchTemplateOverrides struct {
// The instance type.
InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
- // The priority for the launch template override. If OnDemandAllocationStrategy
- // is set to prioritized, Spot Fleet uses priority to determine which launch
- // template override to use first in fulfilling On-Demand capacity. The highest
- // priority is launched first. Valid values are whole numbers starting at 0.
- // The lower the number, the higher the priority. If no number is set, the launch
- // template override has the lowest priority.
+ // The priority for the launch template override. The highest priority is launched
+ // first.
+ //
+ // If OnDemandAllocationStrategy is set to prioritized, Spot Fleet uses priority
+ // to determine which launch template override to use first in fulfilling On-Demand
+ // capacity.
+ //
+ // If the Spot AllocationStrategy is set to capacityOptimizedPrioritized, Spot
+ // Fleet uses priority on a best-effort basis to determine which launch template
+ // override to use in fulfilling Spot capacity, but optimizes for capacity first.
+ //
+ // Valid values are whole numbers starting at 0. The lower the number, the higher
+ // the priority. If no number is set, the launch template override has the lowest
+ // priority. You can set the same priority for different launch template overrides.
Priority *float64 `locationName:"priority" type:"double"`
// The maximum price per unit hour that you are willing to pay for a Spot Instance.
@@ -93874,6 +100410,88 @@ func (s *MemoryInfo) SetSizeInMiB(v int64) *MemoryInfo {
return s
}
+type ModifyAddressAttributeInput struct {
+ _ struct{} `type:"structure"`
+
+ // [EC2-VPC] The allocation ID.
+ //
+ // AllocationId is a required field
+ AllocationId *string `type:"string" required:"true"`
+
+ // The domain name to modify for the IP address.
+ DomainName *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s ModifyAddressAttributeInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ModifyAddressAttributeInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyAddressAttributeInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyAddressAttributeInput"}
+ if s.AllocationId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllocationId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAllocationId sets the AllocationId field's value.
+func (s *ModifyAddressAttributeInput) SetAllocationId(v string) *ModifyAddressAttributeInput {
+ s.AllocationId = &v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *ModifyAddressAttributeInput) SetDomainName(v string) *ModifyAddressAttributeInput {
+ s.DomainName = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyAddressAttributeInput) SetDryRun(v bool) *ModifyAddressAttributeInput {
+ s.DryRun = &v
+ return s
+}
+
+type ModifyAddressAttributeOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the Elastic IP address.
+ Address *AddressAttribute `locationName:"address" type:"structure"`
+}
+
+// String returns the string representation
+func (s ModifyAddressAttributeOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ModifyAddressAttributeOutput) GoString() string {
+ return s.String()
+}
+
+// SetAddress sets the Address field's value.
+func (s *ModifyAddressAttributeOutput) SetAddress(v *AddressAttribute) *ModifyAddressAttributeOutput {
+ s.Address = v
+ return s
+}
+
type ModifyAvailabilityZoneGroupInput struct {
_ struct{} `type:"structure"`
@@ -93968,6 +100586,9 @@ func (s *ModifyAvailabilityZoneGroupOutput) SetReturn(v bool) *ModifyAvailabilit
type ModifyCapacityReservationInput struct {
_ struct{} `type:"structure"`
+ // Reserved. Capacity Reservations you have created are accepted by default.
+ Accept *bool `type:"boolean"`
+
// The ID of the Capacity Reservation.
//
// CapacityReservationId is a required field
@@ -94003,6 +100624,8 @@ type ModifyCapacityReservationInput struct {
EndDateType *string `type:"string" enum:"EndDateType"`
// The number of instances for which to reserve capacity.
+ //
+ // Valid range: 1 - 1000
InstanceCount *int64 `type:"integer"`
}
@@ -94029,6 +100652,12 @@ func (s *ModifyCapacityReservationInput) Validate() error {
return nil
}
+// SetAccept sets the Accept field's value.
+func (s *ModifyCapacityReservationInput) SetAccept(v bool) *ModifyCapacityReservationInput {
+ s.Accept = &v
+ return s
+}
+
// SetCapacityReservationId sets the CapacityReservationId field's value.
func (s *ModifyCapacityReservationInput) SetCapacityReservationId(v string) *ModifyCapacityReservationInput {
s.CapacityReservationId = &v
@@ -94364,12 +100993,11 @@ type ModifyEbsDefaultKmsKeyIdInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // The identifier of the AWS Key Management Service (AWS KMS) customer master
- // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified,
- // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted
- // state must be true.
+ // The identifier of the Key Management Service (KMS) KMS key to use for Amazon
+ // EBS encryption. If this parameter is not specified, your KMS key for Amazon
+ // EBS is used. If KmsKeyId is specified, the encrypted state must be true.
//
- // You can specify the CMK using any of the following:
+ // You can specify the KMS key using any of the following:
//
// * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.
//
@@ -94379,11 +101007,11 @@ type ModifyEbsDefaultKmsKeyIdInput struct {
//
// * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
- // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID,
- // alias, or ARN that is not valid, the action can appear to complete, but eventually
- // fails.
+ // Amazon Web Services authenticates the KMS key asynchronously. Therefore,
+ // if you specify an ID, alias, or ARN that is not valid, the action can appear
+ // to complete, but eventually fails.
//
- // Amazon EBS does not support asymmetric CMKs.
+ // Amazon EBS does not support asymmetric KMS keys.
//
// KmsKeyId is a required field
KmsKeyId *string `type:"string" required:"true"`
@@ -94427,7 +101055,7 @@ func (s *ModifyEbsDefaultKmsKeyIdInput) SetKmsKeyId(v string) *ModifyEbsDefaultK
type ModifyEbsDefaultKmsKeyIdOutput struct {
_ struct{} `type:"structure"`
- // The Amazon Resource Name (ARN) of the default CMK for encryption by default.
+ // The Amazon Resource Name (ARN) of the default KMS key for encryption by default.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
}
@@ -94450,6 +101078,9 @@ func (s *ModifyEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *ModifyEbsDefault
type ModifyFleetInput struct {
_ struct{} `type:"structure"`
+ // Reserved.
+ Context *string `type:"string"`
+
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
@@ -94511,6 +101142,12 @@ func (s *ModifyFleetInput) Validate() error {
return nil
}
+// SetContext sets the Context field's value.
+func (s *ModifyFleetInput) SetContext(v string) *ModifyFleetInput {
+ s.Context = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *ModifyFleetInput) SetDryRun(v bool) *ModifyFleetInput {
s.DryRun = &v
@@ -94724,8 +101361,8 @@ type ModifyHostsInput struct {
HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"`
// Indicates whether to enable or disable host recovery for the Dedicated Host.
- // For more information, see Host Recovery (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // For more information, see Host recovery (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html)
+ // in the Amazon EC2 User Guide.
HostRecovery *string `type:"string" enum:"HostRecovery"`
// Specifies the instance family to be supported by the Dedicated Host. Specify
@@ -95156,7 +101793,7 @@ type ModifyInstanceAttributeInput struct {
// To add instance store volumes to an Amazon EBS-backed instance, you must
// add them when you launch the instance. For more information, see Updating
// the block device mapping when launching an instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
// If the value is true, you can't terminate the instance using the Amazon EC2
@@ -95183,9 +101820,10 @@ type ModifyInstanceAttributeInput struct {
// a PV instance can make it unreachable.
EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"`
- // [EC2-VPC] Changes the security groups of the instance. You must specify at
- // least one security group, even if it's just the default security group for
- // the VPC. You must specify the security group ID, not the security group name.
+ // [EC2-VPC] Replaces the security groups of the instance with the specified
+ // security groups. You must specify at least one security group, even if it's
+ // just the default security group for the VPC. You must specify the security
+ // group ID, not the security group name.
Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"`
// The ID of the instance.
@@ -95198,8 +101836,9 @@ type ModifyInstanceAttributeInput struct {
InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"`
// Changes the instance type to the specified value. For more information, see
- // Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
- // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.
+ // Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ // in the Amazon EC2 User Guide. If the instance type is not valid, the error
+ // returned is InvalidInstanceAttributeValue.
InstanceType *AttributeValue `locationName:"instanceType" type:"structure"`
// Changes the instance's kernel to the specified value. We recommend that you
@@ -95212,9 +101851,12 @@ type ModifyInstanceAttributeInput struct {
// PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html).
Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"`
- // Specifies whether source/destination checking is enabled. A value of true
- // means that checking is enabled, and false means that checking is disabled.
- // This value must be false for a NAT instance to perform NAT.
+ // Enable or disable source/destination checks, which ensure that the instance
+ // is either the source or the destination of any traffic that it receives.
+ // If the value is true, source/destination checks are enabled; otherwise, they
+ // are disabled. The default value is true. You must disable source/destination
+ // checks if the instance runs services such as network address translation,
+ // routing, or firewalls.
SourceDestCheck *AttributeBooleanValue `type:"structure"`
// Set to simple to enable enhanced networking with the Intel 82599 Virtual
@@ -95228,9 +101870,9 @@ type ModifyInstanceAttributeInput struct {
SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"`
// Changes the instance's user data to the specified value. If you are using
- // an AWS SDK or command line tool, base64-encoding is performed for you, and
- // you can load the text from a file. Otherwise, you must provide base64-encoded
- // text.
+ // an Amazon Web Services SDK or command line tool, base64-encoding is performed
+ // for you, and you can load the text from a file. Otherwise, you must provide
+ // base64-encoded text.
UserData *BlobAttributeValue `locationName:"userData" type:"structure"`
// A new value for the attribute. Use only with the kernel, ramdisk, userData,
@@ -95654,6 +102296,126 @@ func (s *ModifyInstanceEventStartTimeOutput) SetEvent(v *InstanceStatusEvent) *M
return s
}
+type ModifyInstanceEventWindowInput struct {
+ _ struct{} `type:"structure"`
+
+ // The cron expression of the event window, for example, * 0-4,20-23 * * 1,5.
+ //
+ // Constraints:
+ //
+ // * Only hour and day of the week values are supported.
+ //
+ // * For day of the week values, you can specify either integers 0 through
+ // 6, or alternative single values SUN through SAT.
+ //
+ // * The minute, month, and year must be specified by *.
+ //
+ // * The hour value must be one or a multiple range, for example, 0-4 or
+ // 0-4,20-23.
+ //
+ // * Each hour range must be >= 2 hours, for example, 0-2 or 20-23.
+ //
+ // * The event window must be >= 4 hours. The combined total time ranges
+ // in the event window must be >= 4 hours.
+ //
+ // For more information about cron expressions, see cron (https://en.wikipedia.org/wiki/Cron)
+ // on the Wikipedia website.
+ CronExpression *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the event window.
+ //
+ // InstanceEventWindowId is a required field
+ InstanceEventWindowId *string `type:"string" required:"true"`
+
+ // The name of the event window.
+ Name *string `type:"string"`
+
+ // The time ranges of the event window.
+ TimeRanges []*InstanceEventWindowTimeRangeRequest `locationName:"TimeRange" type:"list"`
+}
+
+// String returns the string representation
+func (s ModifyInstanceEventWindowInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ModifyInstanceEventWindowInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyInstanceEventWindowInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceEventWindowInput"}
+ if s.InstanceEventWindowId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceEventWindowId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCronExpression sets the CronExpression field's value.
+func (s *ModifyInstanceEventWindowInput) SetCronExpression(v string) *ModifyInstanceEventWindowInput {
+ s.CronExpression = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyInstanceEventWindowInput) SetDryRun(v bool) *ModifyInstanceEventWindowInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetInstanceEventWindowId sets the InstanceEventWindowId field's value.
+func (s *ModifyInstanceEventWindowInput) SetInstanceEventWindowId(v string) *ModifyInstanceEventWindowInput {
+ s.InstanceEventWindowId = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ModifyInstanceEventWindowInput) SetName(v string) *ModifyInstanceEventWindowInput {
+ s.Name = &v
+ return s
+}
+
+// SetTimeRanges sets the TimeRanges field's value.
+func (s *ModifyInstanceEventWindowInput) SetTimeRanges(v []*InstanceEventWindowTimeRangeRequest) *ModifyInstanceEventWindowInput {
+ s.TimeRanges = v
+ return s
+}
+
+type ModifyInstanceEventWindowOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the event window.
+ InstanceEventWindow *InstanceEventWindow `locationName:"instanceEventWindow" type:"structure"`
+}
+
+// String returns the string representation
+func (s ModifyInstanceEventWindowOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ModifyInstanceEventWindowOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceEventWindow sets the InstanceEventWindow field's value.
+func (s *ModifyInstanceEventWindowOutput) SetInstanceEventWindow(v *InstanceEventWindow) *ModifyInstanceEventWindowOutput {
+ s.InstanceEventWindow = v
+ return s
+}
+
type ModifyInstanceMetadataOptionsInput struct {
_ struct{} `type:"structure"`
@@ -95670,6 +102432,9 @@ type ModifyInstanceMetadataOptionsInput struct {
// metadata.
HttpEndpoint *string `type:"string" enum:"InstanceMetadataEndpointState"`
+ // Enables or disables the IPv6 endpoint for the instance metadata service.
+ HttpProtocolIpv6 *string `type:"string" enum:"InstanceMetadataProtocolState"`
+
// The desired HTTP PUT response hop limit for instance metadata requests. The
// larger the number, the further instance metadata requests can travel. If
// no parameter is specified, the existing state is maintained.
@@ -95733,6 +102498,12 @@ func (s *ModifyInstanceMetadataOptionsInput) SetHttpEndpoint(v string) *ModifyIn
return s
}
+// SetHttpProtocolIpv6 sets the HttpProtocolIpv6 field's value.
+func (s *ModifyInstanceMetadataOptionsInput) SetHttpProtocolIpv6(v string) *ModifyInstanceMetadataOptionsInput {
+ s.HttpProtocolIpv6 = &v
+ return s
+}
+
// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value.
func (s *ModifyInstanceMetadataOptionsInput) SetHttpPutResponseHopLimit(v int64) *ModifyInstanceMetadataOptionsInput {
s.HttpPutResponseHopLimit = &v
@@ -96021,6 +102792,11 @@ type ModifyManagedPrefixListInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
+ // The maximum number of entries for the prefix list. You cannot modify the
+ // entries of a prefix list and modify the size of a prefix list at the same
+ // time.
+ MaxEntries *int64 `type:"integer"`
+
// The ID of the prefix list.
//
// PrefixListId is a required field
@@ -96094,6 +102870,12 @@ func (s *ModifyManagedPrefixListInput) SetDryRun(v bool) *ModifyManagedPrefixLis
return s
}
+// SetMaxEntries sets the MaxEntries field's value.
+func (s *ModifyManagedPrefixListInput) SetMaxEntries(v int64) *ModifyManagedPrefixListInput {
+ s.MaxEntries = &v
+ return s
+}
+
// SetPrefixListId sets the PrefixListId field's value.
func (s *ModifyManagedPrefixListInput) SetPrefixListId(v string) *ModifyManagedPrefixListInput {
s.PrefixListId = &v
@@ -96163,11 +102945,12 @@ type ModifyNetworkInterfaceAttributeInput struct {
// NetworkInterfaceId is a required field
NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
- // Indicates whether source/destination checking is enabled. A value of true
- // means checking is enabled, and false means checking is disabled. This value
- // must be false for a NAT instance to perform NAT. For more information, see
- // NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html)
- // in the Amazon Virtual Private Cloud User Guide.
+ // Enable or disable source/destination checks, which ensure that the instance
+ // is either the source or the destination of any traffic that it receives.
+ // If the value is true, source/destination checks are enabled; otherwise, they
+ // are disabled. The default value is true. You must disable source/destination
+ // checks if the instance runs services such as network address translation,
+ // routing, or firewalls.
SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"`
}
@@ -96331,6 +103114,93 @@ func (s *ModifyReservedInstancesOutput) SetReservedInstancesModificationId(v str
return s
}
+type ModifySecurityGroupRulesInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the security group.
+ //
+ // GroupId is a required field
+ GroupId *string `type:"string" required:"true"`
+
+ // Information about the security group properties to update.
+ //
+ // SecurityGroupRules is a required field
+ SecurityGroupRules []*SecurityGroupRuleUpdate `locationName:"SecurityGroupRule" locationNameList:"item" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s ModifySecurityGroupRulesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ModifySecurityGroupRulesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifySecurityGroupRulesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifySecurityGroupRulesInput"}
+ if s.GroupId == nil {
+ invalidParams.Add(request.NewErrParamRequired("GroupId"))
+ }
+ if s.SecurityGroupRules == nil {
+ invalidParams.Add(request.NewErrParamRequired("SecurityGroupRules"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifySecurityGroupRulesInput) SetDryRun(v bool) *ModifySecurityGroupRulesInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetGroupId sets the GroupId field's value.
+func (s *ModifySecurityGroupRulesInput) SetGroupId(v string) *ModifySecurityGroupRulesInput {
+ s.GroupId = &v
+ return s
+}
+
+// SetSecurityGroupRules sets the SecurityGroupRules field's value.
+func (s *ModifySecurityGroupRulesInput) SetSecurityGroupRules(v []*SecurityGroupRuleUpdate) *ModifySecurityGroupRulesInput {
+ s.SecurityGroupRules = v
+ return s
+}
+
+type ModifySecurityGroupRulesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Returns true if the request succeeds; otherwise, returns an error.
+ Return *bool `locationName:"return" type:"boolean"`
+}
+
+// String returns the string representation
+func (s ModifySecurityGroupRulesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ModifySecurityGroupRulesOutput) GoString() string {
+ return s.String()
+}
+
+// SetReturn sets the Return field's value.
+func (s *ModifySecurityGroupRulesOutput) SetReturn(v bool) *ModifySecurityGroupRulesOutput {
+ s.Return = &v
+ return s
+}
+
type ModifySnapshotAttributeInput struct {
_ struct{} `type:"structure"`
@@ -96445,6 +103315,9 @@ func (s ModifySnapshotAttributeOutput) GoString() string {
type ModifySpotFleetRequestInput struct {
_ struct{} `type:"structure"`
+ // Reserved.
+ Context *string `type:"string"`
+
// Indicates whether running Spot Instances should be terminated if the target
// capacity of the Spot Fleet request is decreased below the current size of
// the Spot Fleet.
@@ -96501,6 +103374,12 @@ func (s *ModifySpotFleetRequestInput) Validate() error {
return nil
}
+// SetContext sets the Context field's value.
+func (s *ModifySpotFleetRequestInput) SetContext(v string) *ModifySpotFleetRequestInput {
+ s.Context = &v
+ return s
+}
+
// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value.
func (s *ModifySpotFleetRequestInput) SetExcessCapacityTerminationPolicy(v string) *ModifySpotFleetRequestInput {
s.ExcessCapacityTerminationPolicy = &v
@@ -97373,8 +104252,6 @@ type ModifyTransitGatewayVpcAttachmentInput struct {
DryRun *bool `type:"boolean"`
// The new VPC attachment options.
- //
- // You cannot modify the IPv6 options.
Options *ModifyTransitGatewayVpcAttachmentRequestOptions `type:"structure"`
// The IDs of one or more subnets to remove.
@@ -97599,7 +104476,8 @@ type ModifyVolumeInput struct {
//
// * io2: 100-64,000 IOPS
//
- // Default: If no IOPS value is specified, the existing value is retained.
+ // Default: The existing value is retained if you keep the same volume type.
+ // If you change the volume type to io1, io2, or gp3, the default is 3,000.
Iops *int64 `type:"integer"`
// Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach,
@@ -97622,13 +104500,14 @@ type ModifyVolumeInput struct {
//
// * standard: 1-1,024
//
- // Default: If no size is specified, the existing size is retained.
+ // Default: The existing size is retained.
Size *int64 `type:"integer"`
// The target throughput of the volume, in MiB/s. This parameter is valid only
// for gp3 volumes. The maximum value is 1,000.
//
- // Default: If no throughput value is specified, the existing value is retained.
+ // Default: The existing value is retained if the source and target volume type
+ // is gp3. Otherwise, the default value is 125.
//
// Valid Range: Minimum value of 125. Maximum value of 1000.
Throughput *int64 `type:"integer"`
@@ -97642,7 +104521,7 @@ type ModifyVolumeInput struct {
// EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
- // Default: If no type is specified, the existing type is retained.
+ // Default: The existing type is retained.
VolumeType *string `type:"string" enum:"VolumeType"`
}
@@ -99380,6 +106259,9 @@ func (s *MovingAddressStatus) SetPublicIp(v string) *MovingAddressStatus {
type NatGateway struct {
_ struct{} `type:"structure"`
+ // Indicates whether the NAT gateway supports public or private connectivity.
+ ConnectivityType *string `locationName:"connectivityType" type:"string" enum:"ConnectivityType"`
+
// The date and time the NAT gateway was created.
CreateTime *time.Time `locationName:"createTime" type:"timestamp"`
@@ -99464,6 +106346,12 @@ func (s NatGateway) GoString() string {
return s.String()
}
+// SetConnectivityType sets the ConnectivityType field's value.
+func (s *NatGateway) SetConnectivityType(v string) *NatGateway {
+ s.ConnectivityType = &v
+ return s
+}
+
// SetCreateTime sets the CreateTime field's value.
func (s *NatGateway) SetCreateTime(v time.Time) *NatGateway {
s.CreateTime = &v
@@ -99534,17 +106422,18 @@ func (s *NatGateway) SetVpcId(v string) *NatGateway {
type NatGatewayAddress struct {
_ struct{} `type:"structure"`
- // The allocation ID of the Elastic IP address that's associated with the NAT
- // gateway.
+ // [Public NAT gateway only] The allocation ID of the Elastic IP address that's
+ // associated with the NAT gateway.
AllocationId *string `locationName:"allocationId" type:"string"`
// The ID of the network interface associated with the NAT gateway.
NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
- // The private IP address associated with the Elastic IP address.
+ // The private IP address associated with the NAT gateway.
PrivateIp *string `locationName:"privateIp" type:"string"`
- // The Elastic IP address associated with the NAT gateway.
+ // [Public NAT gateway only] The Elastic IP address associated with the NAT
+ // gateway.
PublicIp *string `locationName:"publicIp" type:"string"`
}
@@ -99598,7 +106487,7 @@ type NetworkAcl struct {
// The ID of the network ACL.
NetworkAclId *string `locationName:"networkAclId" type:"string"`
- // The ID of the AWS account that owns the network ACL.
+ // The ID of the Amazon Web Services account that owns the network ACL.
OwnerId *string `locationName:"ownerId" type:"string"`
// Any tags assigned to the network ACL.
@@ -99840,12 +106729,19 @@ type NetworkInfo struct {
// The index of the default network card, starting at 0.
DefaultNetworkCardIndex *int64 `locationName:"defaultNetworkCardIndex" type:"integer"`
+ // Describes the Elastic Fabric Adapters for the instance type.
+ EfaInfo *EfaInfo `locationName:"efaInfo" type:"structure"`
+
// Indicates whether Elastic Fabric Adapter (EFA) is supported.
EfaSupported *bool `locationName:"efaSupported" type:"boolean"`
// Indicates whether Elastic Network Adapter (ENA) is supported.
EnaSupport *string `locationName:"enaSupport" type:"string" enum:"EnaSupport"`
+ // Indicates whether the instance type automatically encrypts in-transit traffic
+ // between instances.
+ EncryptionInTransitSupported *bool `locationName:"encryptionInTransitSupported" type:"boolean"`
+
// The maximum number of IPv4 addresses per network interface.
Ipv4AddressesPerInterface *int64 `locationName:"ipv4AddressesPerInterface" type:"integer"`
@@ -99885,6 +106781,12 @@ func (s *NetworkInfo) SetDefaultNetworkCardIndex(v int64) *NetworkInfo {
return s
}
+// SetEfaInfo sets the EfaInfo field's value.
+func (s *NetworkInfo) SetEfaInfo(v *EfaInfo) *NetworkInfo {
+ s.EfaInfo = v
+ return s
+}
+
// SetEfaSupported sets the EfaSupported field's value.
func (s *NetworkInfo) SetEfaSupported(v bool) *NetworkInfo {
s.EfaSupported = &v
@@ -99897,6 +106799,12 @@ func (s *NetworkInfo) SetEnaSupport(v string) *NetworkInfo {
return s
}
+// SetEncryptionInTransitSupported sets the EncryptionInTransitSupported field's value.
+func (s *NetworkInfo) SetEncryptionInTransitSupported(v bool) *NetworkInfo {
+ s.EncryptionInTransitSupported = &v
+ return s
+}
+
// SetIpv4AddressesPerInterface sets the Ipv4AddressesPerInterface field's value.
func (s *NetworkInfo) SetIpv4AddressesPerInterface(v int64) *NetworkInfo {
s.Ipv4AddressesPerInterface = &v
@@ -99950,7 +106858,8 @@ type NetworkInsightsAnalysis struct {
// codes (https://docs.aws.amazon.com/vpc/latest/reachability/explanation-codes.html).
Explanations []*Explanation `locationName:"explanationSet" locationNameList:"item" type:"list"`
- // The Amazon Resource Names (ARN) of the AWS resources that the path must traverse.
+ // The Amazon Resource Names (ARN) of the Amazon Web Services resources that
+ // the path must traverse.
FilterInArns []*string `locationName:"filterInArnSet" locationNameList:"item" type:"list"`
// The components in the path from source to destination.
@@ -100079,10 +106988,11 @@ type NetworkInsightsPath struct {
// The time stamp when the path was created.
CreatedDate *time.Time `locationName:"createdDate" type:"timestamp"`
- // The AWS resource that is the destination of the path.
+ // The Amazon Web Services resource that is the destination of the path.
Destination *string `locationName:"destination" type:"string"`
- // The IP address of the AWS resource that is the destination of the path.
+ // The IP address of the Amazon Web Services resource that is the destination
+ // of the path.
DestinationIp *string `locationName:"destinationIp" type:"string"`
// The destination port.
@@ -100097,10 +107007,11 @@ type NetworkInsightsPath struct {
// The protocol.
Protocol *string `locationName:"protocol" type:"string" enum:"Protocol"`
- // The AWS resource that is the source of the path.
+ // The Amazon Web Services resource that is the source of the path.
Source *string `locationName:"source" type:"string"`
- // The IP address of the AWS resource that is the source of the path.
+ // The IP address of the Amazon Web Services resource that is the source of
+ // the path.
SourceIp *string `locationName:"sourceIp" type:"string"`
// The tags associated with the path.
@@ -100200,9 +107111,15 @@ type NetworkInterface struct {
// The type of network interface.
InterfaceType *string `locationName:"interfaceType" type:"string" enum:"NetworkInterfaceType"`
+ // The IPv4 prefixes that are assigned to the network interface.
+ Ipv4Prefixes []*Ipv4PrefixSpecification `locationName:"ipv4PrefixSet" locationNameList:"item" type:"list"`
+
// The IPv6 addresses associated with the network interface.
Ipv6Addresses []*NetworkInterfaceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"`
+ // The IPv6 prefixes that are assigned to the network interface.
+ Ipv6Prefixes []*Ipv6PrefixSpecification `locationName:"ipv6PrefixSet" locationNameList:"item" type:"list"`
+
// The MAC address.
MacAddress *string `locationName:"macAddress" type:"string"`
@@ -100212,7 +107129,7 @@ type NetworkInterface struct {
// The Amazon Resource Name (ARN) of the Outpost.
OutpostArn *string `locationName:"outpostArn" type:"string"`
- // The AWS account ID of the owner of the network interface.
+ // The Amazon Web Services account ID of the owner of the network interface.
OwnerId *string `locationName:"ownerId" type:"string"`
// The private DNS name.
@@ -100224,14 +107141,14 @@ type NetworkInterface struct {
// The private IPv4 addresses associated with the network interface.
PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"`
- // The ID of the entity that launched the instance on your behalf (for example,
- // AWS Management Console or Auto Scaling).
+ // The alias or Amazon Web Services account ID of the principal or service that
+ // created the network interface.
RequesterId *string `locationName:"requesterId" type:"string"`
- // Indicates whether the network interface is being managed by AWS.
+ // Indicates whether the network interface is being managed by Amazon Web Services.
RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"`
- // Indicates whether traffic to or from the instance is validated.
+ // Indicates whether source/destination checking is enabled.
SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"`
// The status of the network interface.
@@ -100293,12 +107210,24 @@ func (s *NetworkInterface) SetInterfaceType(v string) *NetworkInterface {
return s
}
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *NetworkInterface) SetIpv4Prefixes(v []*Ipv4PrefixSpecification) *NetworkInterface {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetIpv6Addresses sets the Ipv6Addresses field's value.
func (s *NetworkInterface) SetIpv6Addresses(v []*NetworkInterfaceIpv6Address) *NetworkInterface {
s.Ipv6Addresses = v
return s
}
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *NetworkInterface) SetIpv6Prefixes(v []*Ipv6PrefixSpecification) *NetworkInterface {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetMacAddress sets the MacAddress field's value.
func (s *NetworkInterface) SetMacAddress(v string) *NetworkInterface {
s.MacAddress = &v
@@ -100485,7 +107414,7 @@ type NetworkInterfaceAttachment struct {
// The ID of the instance.
InstanceId *string `locationName:"instanceId" type:"string"`
- // The AWS account ID of the owner of the instance.
+ // The Amazon Web Services account ID of the owner of the instance.
InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"`
// The index of the network card.
@@ -100614,10 +107543,10 @@ func (s *NetworkInterfaceIpv6Address) SetIpv6Address(v string) *NetworkInterface
type NetworkInterfacePermission struct {
_ struct{} `type:"structure"`
- // The AWS account ID.
+ // The Amazon Web Services account ID.
AwsAccountId *string `locationName:"awsAccountId" type:"string"`
- // The AWS service.
+ // The Amazon Web Service.
AwsService *string `locationName:"awsService" type:"string"`
// The ID of the network interface.
@@ -101246,7 +108175,7 @@ func (s *PeeringConnectionOptionsRequest) SetAllowEgressFromLocalVpcToRemoteClas
type PeeringTgwInfo struct {
_ struct{} `type:"structure"`
- // The AWS account ID of the owner of the transit gateway.
+ // The ID of the Amazon Web Services account that owns the transit gateway.
OwnerId *string `locationName:"ownerId" type:"string"`
// The Region of the transit gateway.
@@ -101862,11 +108791,11 @@ func (s *PortRange) SetTo(v int64) *PortRange {
return s
}
-// Describes prefixes for AWS services.
+// Describes prefixes for Amazon Web Services services.
type PrefixList struct {
_ struct{} `type:"structure"`
- // The IP address range of the AWS service.
+ // The IP address range of the Amazon Web Service.
Cidrs []*string `locationName:"cidrSet" locationNameList:"item" type:"list"`
// The ID of the prefix.
@@ -102410,6 +109339,9 @@ type ProvisionByoipCidrInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
+ // Reserved.
+ MultiRegion *bool `type:"boolean"`
+
// The tags to apply to the address pool.
PoolTagSpecifications []*TagSpecification `locationName:"PoolTagSpecification" locationNameList:"item" type:"list"`
@@ -102472,6 +109404,12 @@ func (s *ProvisionByoipCidrInput) SetDryRun(v bool) *ProvisionByoipCidrInput {
return s
}
+// SetMultiRegion sets the MultiRegion field's value.
+func (s *ProvisionByoipCidrInput) SetMultiRegion(v bool) *ProvisionByoipCidrInput {
+ s.MultiRegion = &v
+ return s
+}
+
// SetPoolTagSpecifications sets the PoolTagSpecifications field's value.
func (s *ProvisionByoipCidrInput) SetPoolTagSpecifications(v []*TagSpecification) *ProvisionByoipCidrInput {
s.PoolTagSpecifications = v
@@ -102579,6 +109517,48 @@ func (s *ProvisionedBandwidth) SetStatus(v string) *ProvisionedBandwidth {
return s
}
+// The status of an updated pointer (PTR) record for an Elastic IP address.
+type PtrUpdateStatus struct {
+ _ struct{} `type:"structure"`
+
+ // The reason for the PTR record update.
+ Reason *string `locationName:"reason" type:"string"`
+
+ // The status of the PTR record update.
+ Status *string `locationName:"status" type:"string"`
+
+ // The value for the PTR record update.
+ Value *string `locationName:"value" type:"string"`
+}
+
+// String returns the string representation
+func (s PtrUpdateStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PtrUpdateStatus) GoString() string {
+ return s.String()
+}
+
+// SetReason sets the Reason field's value.
+func (s *PtrUpdateStatus) SetReason(v string) *PtrUpdateStatus {
+ s.Reason = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *PtrUpdateStatus) SetStatus(v string) *PtrUpdateStatus {
+ s.Status = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *PtrUpdateStatus) SetValue(v string) *PtrUpdateStatus {
+ s.Value = &v
+ return s
+}
+
// Describes an IPv4 address pool.
type PublicIpv4Pool struct {
_ struct{} `type:"structure"`
@@ -102588,7 +109568,7 @@ type PublicIpv4Pool struct {
// The name of the location from which the address pool is advertised. A network
// border group is a unique set of Availability Zones or Local Zones from where
- // AWS advertises public IP addresses.
+ // Amazon Web Services advertises public IP addresses.
NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"`
// The address ranges.
@@ -102803,7 +109783,7 @@ type PurchaseHostReservationInput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `type:"string"`
// The currency in which the totalUpfrontPrice, LimitPrice, and totalHourlyPrice
@@ -102898,7 +109878,7 @@ type PurchaseHostReservationOutput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `locationName:"clientToken" type:"string"`
// The currency in which the totalUpfrontPrice and totalHourlyPrice amounts
@@ -103098,7 +110078,10 @@ func (s *PurchaseReservedInstancesOfferingInput) SetReservedInstancesOfferingId(
type PurchaseReservedInstancesOfferingOutput struct {
_ struct{} `type:"structure"`
- // The IDs of the purchased Reserved Instances.
+ // The IDs of the purchased Reserved Instances. If your purchase crosses into
+ // a discounted pricing tier, the final Reserved Instances IDs might change.
+ // For more information, see Crossing pricing tiers (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-reserved-instances-application.html#crossing-pricing-tiers)
+ // in the Amazon Elastic Compute Cloud User Guide.
ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"`
}
@@ -103313,6 +110296,66 @@ func (s *RecurringCharge) SetFrequency(v string) *RecurringCharge {
return s
}
+// Describes the security group that is referenced in the security group rule.
+type ReferencedSecurityGroup struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the security group.
+ GroupId *string `locationName:"groupId" type:"string"`
+
+ // The status of a VPC peering connection, if applicable.
+ PeeringStatus *string `locationName:"peeringStatus" type:"string"`
+
+ // The Amazon Web Services account ID.
+ UserId *string `locationName:"userId" type:"string"`
+
+ // The ID of the VPC.
+ VpcId *string `locationName:"vpcId" type:"string"`
+
+ // The ID of the VPC peering connection.
+ VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
+}
+
+// String returns the string representation
+func (s ReferencedSecurityGroup) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReferencedSecurityGroup) GoString() string {
+ return s.String()
+}
+
+// SetGroupId sets the GroupId field's value.
+func (s *ReferencedSecurityGroup) SetGroupId(v string) *ReferencedSecurityGroup {
+ s.GroupId = &v
+ return s
+}
+
+// SetPeeringStatus sets the PeeringStatus field's value.
+func (s *ReferencedSecurityGroup) SetPeeringStatus(v string) *ReferencedSecurityGroup {
+ s.PeeringStatus = &v
+ return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *ReferencedSecurityGroup) SetUserId(v string) *ReferencedSecurityGroup {
+ s.UserId = &v
+ return s
+}
+
+// SetVpcId sets the VpcId field's value.
+func (s *ReferencedSecurityGroup) SetVpcId(v string) *ReferencedSecurityGroup {
+ s.VpcId = &v
+ return s
+}
+
+// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
+func (s *ReferencedSecurityGroup) SetVpcPeeringConnectionId(v string) *ReferencedSecurityGroup {
+ s.VpcPeeringConnectionId = &v
+ return s
+}
+
// Describes a Region.
type Region struct {
_ struct{} `type:"structure"`
@@ -103372,8 +110415,21 @@ type RegisterImageInput struct {
BillingProducts []*string `locationName:"BillingProduct" locationNameList:"item" type:"list"`
// The block device mapping entries.
+ //
+ // If you specify an EBS volume using the ID of an EBS snapshot, you can't specify
+ // the encryption state of the volume.
+ //
+ // If you create an AMI on an Outpost, then all backing snapshots must be on
+ // the same Outpost or in the Region of that Outpost. AMIs on an Outpost that
+ // include local snapshots can be used to launch instances on the same Outpost
+ // only. For more information, Amazon EBS local snapshots on Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#ami)
+ // in the Amazon Elastic Compute Cloud User Guide.
BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
+ // The boot mode of the AMI. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ BootMode *string `type:"string" enum:"BootModeValues"`
+
// A description for your AMI.
Description *string `locationName:"description" type:"string"`
@@ -103471,6 +110527,12 @@ func (s *RegisterImageInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *Re
return s
}
+// SetBootMode sets the BootMode field's value.
+func (s *RegisterImageInput) SetBootMode(v string) *RegisterImageInput {
+ s.BootMode = &v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *RegisterImageInput) SetDescription(v string) *RegisterImageInput {
s.Description = &v
@@ -104700,6 +111762,94 @@ func (s ReplaceNetworkAclEntryOutput) GoString() string {
return s.String()
}
+// Information about a root volume replacement task.
+type ReplaceRootVolumeTask struct {
+ _ struct{} `type:"structure"`
+
+ // The time the task completed.
+ CompleteTime *string `locationName:"completeTime" type:"string"`
+
+ // The ID of the instance for which the root volume replacement task was created.
+ InstanceId *string `locationName:"instanceId" type:"string"`
+
+ // The ID of the root volume replacement task.
+ ReplaceRootVolumeTaskId *string `locationName:"replaceRootVolumeTaskId" type:"string"`
+
+ // The time the task was started.
+ StartTime *string `locationName:"startTime" type:"string"`
+
+ // The tags assigned to the task.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The state of the task. The task can be in one of the following states:
+ //
+ // * pending - the replacement volume is being created.
+ //
+ // * in-progress - the original volume is being detached and the replacement
+ // volume is being attached.
+ //
+ // * succeeded - the replacement volume has been successfully attached to
+ // the instance and the instance is available.
+ //
+ // * failing - the replacement task is in the process of failing.
+ //
+ // * failed - the replacement task has failed but the original root volume
+ // is still attached.
+ //
+ // * failing-detached - the replacement task is in the process of failing.
+ // The instance might have no root volume attached.
+ //
+ // * failed-detached - the replacement task has failed and the instance has
+ // no root volume attached.
+ TaskState *string `locationName:"taskState" type:"string" enum:"ReplaceRootVolumeTaskState"`
+}
+
+// String returns the string representation
+func (s ReplaceRootVolumeTask) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplaceRootVolumeTask) GoString() string {
+ return s.String()
+}
+
+// SetCompleteTime sets the CompleteTime field's value.
+func (s *ReplaceRootVolumeTask) SetCompleteTime(v string) *ReplaceRootVolumeTask {
+ s.CompleteTime = &v
+ return s
+}
+
+// SetInstanceId sets the InstanceId field's value.
+func (s *ReplaceRootVolumeTask) SetInstanceId(v string) *ReplaceRootVolumeTask {
+ s.InstanceId = &v
+ return s
+}
+
+// SetReplaceRootVolumeTaskId sets the ReplaceRootVolumeTaskId field's value.
+func (s *ReplaceRootVolumeTask) SetReplaceRootVolumeTaskId(v string) *ReplaceRootVolumeTask {
+ s.ReplaceRootVolumeTaskId = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *ReplaceRootVolumeTask) SetStartTime(v string) *ReplaceRootVolumeTask {
+ s.StartTime = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *ReplaceRootVolumeTask) SetTags(v []*Tag) *ReplaceRootVolumeTask {
+ s.Tags = v
+ return s
+}
+
+// SetTaskState sets the TaskState field's value.
+func (s *ReplaceRootVolumeTask) SetTaskState(v string) *ReplaceRootVolumeTask {
+ s.TaskState = &v
+ return s
+}
+
type ReplaceRouteInput struct {
_ struct{} `type:"structure"`
@@ -105276,20 +112426,21 @@ type RequestLaunchTemplateData struct {
// The elastic inference accelerator for the instance.
ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"`
- // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more
- // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
- // in the AWS Nitro Enclaves User Guide.
+ // Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+ // For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
+ // in the Amazon Web Services Nitro Enclaves User Guide.
//
- // You can't enable AWS Nitro Enclaves and hibernation on the same instance.
+ // You can't enable Amazon Web Services Nitro Enclaves and hibernation on the
+ // same instance.
EnclaveOptions *LaunchTemplateEnclaveOptionsRequest `type:"structure"`
// Indicates whether an instance is enabled for hibernation. This parameter
// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites).
- // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
+ // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
// in the Amazon Elastic Compute Cloud User Guide.
HibernationOptions *LaunchTemplateHibernationOptionsRequest `type:"structure"`
- // The IAM instance profile.
+ // The name or Amazon Resource Name (ARN) of an IAM instance profile.
IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecificationRequest `type:"structure"`
// The ID of the AMI.
@@ -105326,7 +112477,7 @@ type RequestLaunchTemplateData struct {
LicenseSpecifications []*LaunchTemplateLicenseConfigurationRequest `locationName:"LicenseSpecification" locationNameList:"item" type:"list"`
// The metadata options for the instance. For more information, see Instance
- // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
+ // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
// in the Amazon Elastic Compute Cloud User Guide.
MetadataOptions *LaunchTemplateInstanceMetadataOptionsRequest `type:"structure"`
@@ -105364,10 +112515,16 @@ type RequestLaunchTemplateData struct {
// created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html).
TagSpecifications []*LaunchTemplateTagSpecificationRequest `locationName:"TagSpecification" locationNameList:"LaunchTemplateTagSpecificationRequest" type:"list"`
- // The Base64-encoded user data to make available to the instance. For more
- // information, see Running Commands on Your Linux Instance at Launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html)
- // (Linux) and Adding User Data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data)
+ // The user data to make available to the instance. You must provide base64-encoded
+ // text. User data is limited to 16 KB. For more information, see Running Commands
+ // on Your Linux Instance at Launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html)
+ // (Linux) or Adding User Data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data)
// (Windows).
+ //
+ // If you are creating the launch template for use with Batch, the user data
+ // must be provided in the MIME multi-part archive format (https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive).
+ // For more information, see Amazon EC2 user data in launch templates (https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html)
+ // in the Batch User Guide.
UserData *string `type:"string"`
}
@@ -105683,20 +112840,7 @@ type RequestSpotInstancesInput struct {
// Default: Instances are launched in any available Availability Zone.
AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"`
- // The required duration for the Spot Instances (also known as Spot blocks),
- // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300,
- // or 360).
- //
- // The duration period starts as soon as your Spot Instance receives its instance
- // ID. At the end of the duration period, Amazon EC2 marks the Spot Instance
- // for termination and provides a Spot Instance termination notice, which gives
- // the instance a two-minute warning before it terminates.
- //
- // You can't specify an Availability Zone group or a launch group if you specify
- // a duration.
- //
- // New accounts or accounts with no previous billing history with AWS are not
- // eligible for Spot Instances with a defined duration (also known as Spot blocks).
+ // Deprecated.
BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
@@ -105953,9 +113097,7 @@ type RequestSpotLaunchSpecification struct {
// you can specify the names or the IDs of the security groups.
SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"`
- // The IDs of the subnets in which to launch the instance. To specify multiple
- // subnets, separate them using commas; for example, "subnet-1234abcdeexample1,
- // subnet-0987cdef6example2".
+ // The ID of the subnet in which to launch the instance.
SubnetId *string `locationName:"subnetId" type:"string"`
// The Base64-encoded user data for the instance. User data is limited to 16
@@ -106096,11 +113238,11 @@ type Reservation struct {
// The instances.
Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"`
- // The ID of the AWS account that owns the reservation.
+ // The ID of the Amazon Web Services account that owns the reservation.
OwnerId *string `locationName:"ownerId" type:"string"`
// The ID of the requester that launched the instances on your behalf (for example,
- // AWS Management Console or Auto Scaling).
+ // Amazon Web Services Management Console or Auto Scaling).
RequesterId *string `locationName:"requesterId" type:"string"`
// The ID of the reservation.
@@ -106789,8 +113931,8 @@ type ReservedInstancesOffering struct {
InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
// Indicates whether the offering is available through the Reserved Instance
- // Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering,
- // this is true.
+ // Marketplace (resale) or Amazon Web Services. If it's a Reserved Instance
+ // Marketplace offering, this is true.
Marketplace *bool `locationName:"marketplace" type:"boolean"`
// If convertible it can be exchanged for Reserved Instances of the same or
@@ -106922,6 +114064,93 @@ func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesO
return s
}
+type ResetAddressAttributeInput struct {
+ _ struct{} `type:"structure"`
+
+ // [EC2-VPC] The allocation ID.
+ //
+ // AllocationId is a required field
+ AllocationId *string `type:"string" required:"true"`
+
+ // The attribute of the IP address.
+ //
+ // Attribute is a required field
+ Attribute *string `type:"string" required:"true" enum:"AddressAttributeName"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s ResetAddressAttributeInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResetAddressAttributeInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ResetAddressAttributeInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ResetAddressAttributeInput"}
+ if s.AllocationId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllocationId"))
+ }
+ if s.Attribute == nil {
+ invalidParams.Add(request.NewErrParamRequired("Attribute"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAllocationId sets the AllocationId field's value.
+func (s *ResetAddressAttributeInput) SetAllocationId(v string) *ResetAddressAttributeInput {
+ s.AllocationId = &v
+ return s
+}
+
+// SetAttribute sets the Attribute field's value.
+func (s *ResetAddressAttributeInput) SetAttribute(v string) *ResetAddressAttributeInput {
+ s.Attribute = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ResetAddressAttributeInput) SetDryRun(v bool) *ResetAddressAttributeInput {
+ s.DryRun = &v
+ return s
+}
+
+type ResetAddressAttributeOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the IP address.
+ Address *AddressAttribute `locationName:"address" type:"structure"`
+}
+
+// String returns the string representation
+func (s ResetAddressAttributeOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResetAddressAttributeOutput) GoString() string {
+ return s.String()
+}
+
+// SetAddress sets the Address field's value.
+func (s *ResetAddressAttributeOutput) SetAddress(v *AddressAttribute) *ResetAddressAttributeOutput {
+ s.Address = v
+ return s
+}
+
type ResetEbsDefaultKmsKeyIdInput struct {
_ struct{} `type:"structure"`
@@ -106951,7 +114180,8 @@ func (s *ResetEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *ResetEbsDefaultKmsKeyI
type ResetEbsDefaultKmsKeyIdOutput struct {
_ struct{} `type:"structure"`
- // The Amazon Resource Name (ARN) of the default CMK for EBS encryption by default.
+ // The Amazon Resource Name (ARN) of the default KMS key for EBS encryption
+ // by default.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
}
@@ -107412,7 +114642,7 @@ type ResponseLaunchTemplateData struct {
CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationResponse `locationName:"capacityReservationSpecification" type:"structure"`
// The CPU options for the instance. For more information, see Optimizing CPU
- // Options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
+ // options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
// in the Amazon Elastic Compute Cloud User Guide.
CpuOptions *LaunchTemplateCpuOptions `locationName:"cpuOptions" type:"structure"`
@@ -107432,11 +114662,11 @@ type ResponseLaunchTemplateData struct {
// The elastic inference accelerator for the instance.
ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAcceleratorResponse `locationName:"elasticInferenceAcceleratorSet" locationNameList:"item" type:"list"`
- // Indicates whether the instance is enabled for AWS Nitro Enclaves.
+ // Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
EnclaveOptions *LaunchTemplateEnclaveOptions `locationName:"enclaveOptions" type:"structure"`
// Indicates whether an instance is configured for hibernation. For more information,
- // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
+ // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
// in the Amazon Elastic Compute Cloud User Guide.
HibernationOptions *LaunchTemplateHibernationOptions `locationName:"hibernationOptions" type:"structure"`
@@ -107466,7 +114696,7 @@ type ResponseLaunchTemplateData struct {
LicenseSpecifications []*LaunchTemplateLicenseConfiguration `locationName:"licenseSet" locationNameList:"item" type:"list"`
// The metadata options for the instance. For more information, see Instance
- // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
+ // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
// in the Amazon Elastic Compute Cloud User Guide.
MetadataOptions *LaunchTemplateInstanceMetadataOptions `locationName:"metadataOptions" type:"structure"`
@@ -107984,6 +115214,9 @@ type RevokeSecurityGroupEgressInput struct {
// number.
IpProtocol *string `locationName:"ipProtocol" type:"string"`
+ // The IDs of the security group rules.
+ SecurityGroupRuleIds []*string `locationName:"SecurityGroupRuleId" locationNameList:"item" type:"list"`
+
// Not supported. Use a set of IP permissions to specify a destination security
// group.
SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"`
@@ -108055,6 +115288,12 @@ func (s *RevokeSecurityGroupEgressInput) SetIpProtocol(v string) *RevokeSecurity
return s
}
+// SetSecurityGroupRuleIds sets the SecurityGroupRuleIds field's value.
+func (s *RevokeSecurityGroupEgressInput) SetSecurityGroupRuleIds(v []*string) *RevokeSecurityGroupEgressInput {
+ s.SecurityGroupRuleIds = v
+ return s
+}
+
// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value.
func (s *RevokeSecurityGroupEgressInput) SetSourceSecurityGroupName(v string) *RevokeSecurityGroupEgressInput {
s.SourceSecurityGroupName = &v
@@ -108140,6 +115379,9 @@ type RevokeSecurityGroupIngressInput struct {
// Use -1 to specify all.
IpProtocol *string `type:"string"`
+ // The IDs of the security group rules.
+ SecurityGroupRuleIds []*string `locationName:"SecurityGroupRuleId" locationNameList:"item" type:"list"`
+
// [EC2-Classic, default VPC] The name of the source security group. You can't
// specify this parameter in combination with the following parameters: the
// CIDR IP address range, the start of the port range, the IP protocol, and
@@ -108148,12 +115390,12 @@ type RevokeSecurityGroupIngressInput struct {
// use a set of IP permissions instead.
SourceSecurityGroupName *string `type:"string"`
- // [EC2-Classic] The AWS account ID of the source security group, if the source
- // security group is in a different account. You can't specify this parameter
- // in combination with the following parameters: the CIDR IP address range,
- // the IP protocol, the start of the port range, and the end of the port range.
- // To revoke a specific rule for an IP protocol and port range, use a set of
- // IP permissions instead.
+ // [EC2-Classic] The Amazon Web Services account ID of the source security group,
+ // if the source security group is in a different account. You can't specify
+ // this parameter in combination with the following parameters: the CIDR IP
+ // address range, the IP protocol, the start of the port range, and the end
+ // of the port range. To revoke a specific rule for an IP protocol and port
+ // range, use a set of IP permissions instead.
SourceSecurityGroupOwnerId *string `type:"string"`
// The end of port range for the TCP and UDP protocols, or an ICMP code number.
@@ -108213,6 +115455,12 @@ func (s *RevokeSecurityGroupIngressInput) SetIpProtocol(v string) *RevokeSecurit
return s
}
+// SetSecurityGroupRuleIds sets the SecurityGroupRuleIds field's value.
+func (s *RevokeSecurityGroupIngressInput) SetSecurityGroupRuleIds(v []*string) *RevokeSecurityGroupIngressInput {
+ s.SecurityGroupRuleIds = v
+ return s
+}
+
// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value.
func (s *RevokeSecurityGroupIngressInput) SetSourceSecurityGroupName(v string) *RevokeSecurityGroupIngressInput {
s.SourceSecurityGroupName = &v
@@ -108277,7 +115525,7 @@ type Route struct {
// The IPv6 CIDR block used for the destination match.
DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"`
- // The prefix of the AWS service.
+ // The prefix of the Amazon Web Service.
DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"`
// The ID of the egress-only internet gateway.
@@ -108289,7 +115537,7 @@ type Route struct {
// The ID of a NAT instance in your VPC.
InstanceId *string `locationName:"instanceId" type:"string"`
- // The AWS account ID of the owner of the instance.
+ // The ID of Amazon Web Services account that owns the instance.
InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"`
// The ID of the local gateway.
@@ -108430,7 +115678,7 @@ type RouteTable struct {
// The associations between the route table and one or more subnets or a gateway.
Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"`
- // The ID of the AWS account that owns the route table.
+ // The ID of the Amazon Web Services account that owns the route table.
OwnerId *string `locationName:"ownerId" type:"string"`
// Any virtual private gateway (VGW) propagating routes.
@@ -108610,7 +115858,10 @@ type RunInstancesInput struct {
// Reserved.
AdditionalInfo *string `locationName:"additionalInfo" type:"string"`
- // The block device mapping entries.
+ // The block device mapping, which defines the EBS volumes and instance store
+ // volumes to attach to the instance at launch. For more information, see Block
+ // device mappings (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
+ // in the Amazon EC2 User Guide.
BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
// Information about the Capacity Reservation targeting option. If you do not
@@ -108630,14 +115881,14 @@ type RunInstancesInput struct {
// The CPU options for the instance. For more information, see Optimizing CPU
// options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
CpuOptions *CpuOptionsRequest `type:"structure"`
// The credit option for CPU usage of the burstable performance instance. Valid
// values are standard and unlimited. To change this attribute after launch,
// use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html).
// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
//
// Default: standard (T2 instances) or unlimited (T3/T3a instances)
CreditSpecification *CreditSpecificationRequest `type:"structure"`
@@ -108669,7 +115920,7 @@ type RunInstancesInput struct {
// An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource
// that you can attach to your Windows instance to accelerate the graphics performance
// of your applications. For more information, see Amazon EC2 Elastic GPUs (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
ElasticGpuSpecification []*ElasticGpuSpecification `locationNameList:"item" type:"list"`
// An elastic inference accelerator to associate with the instance. Elastic
@@ -108679,21 +115930,23 @@ type RunInstancesInput struct {
// You cannot specify accelerators from different generations in the same request.
ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"`
- // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more
- // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
- // in the AWS Nitro Enclaves User Guide.
+ // Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+ // For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)
+ // in the Amazon Web Services Nitro Enclaves User Guide.
//
- // You can't enable AWS Nitro Enclaves and hibernation on the same instance.
+ // You can't enable Amazon Web Services Nitro Enclaves and hibernation on the
+ // same instance.
EnclaveOptions *EnclaveOptionsRequest `type:"structure"`
// Indicates whether an instance is enabled for hibernation. For more information,
// see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
//
- // You can't enable hibernation and AWS Nitro Enclaves on the same instance.
+ // You can't enable hibernation and Amazon Web Services Nitro Enclaves on the
+ // same instance.
HibernationOptions *HibernationOptionsRequest `type:"structure"`
- // The IAM instance profile.
+ // The name or Amazon Resource Name (ARN) of an IAM instance profile.
IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"`
// The ID of the AMI. An AMI ID is required to launch an instance and must be
@@ -108713,7 +115966,7 @@ type RunInstancesInput struct {
InstanceMarketOptions *InstanceMarketOptionsRequest `type:"structure"`
// The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
//
// Default: m1.small
InstanceType *string `type:"string" enum:"InstanceType"`
@@ -108741,7 +115994,7 @@ type RunInstancesInput struct {
//
// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more
// information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
KernelId *string `type:"string"`
// The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html)
@@ -108812,12 +116065,12 @@ type RunInstancesInput struct {
// The ID of the RAM disk to select. Some kernels require additional drivers
// at launch. Check the kernel requirements for information about whether you
- // need to specify a RAM disk. To find kernel requirements, go to the AWS Resource
- // Center and search for the kernel ID.
+ // need to specify a RAM disk. To find kernel requirements, go to the Amazon
+ // Web Services Resource Center and search for the kernel ID.
//
// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more
// information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
RamdiskId *string `type:"string"`
// The IDs of the security groups. You can create a security group using CreateSecurityGroup
@@ -109291,6 +116544,47 @@ func (s *RunScheduledInstancesOutput) SetInstanceIdSet(v []*string) *RunSchedule
return s
}
+// The tags to apply to the AMI object that will be stored in the S3 bucket.
+// For more information, see Categorizing your storage using tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html)
+// in the Amazon Simple Storage Service User Guide.
+type S3ObjectTag struct {
+ _ struct{} `type:"structure"`
+
+ // The key of the tag.
+ //
+ // Constraints: Tag keys are case-sensitive and can be up to 128 Unicode characters
+ // in length. May not begin with aws:.
+ Key *string `type:"string"`
+
+ // The value of the tag.
+ //
+ // Constraints: Tag values are case-sensitive and can be up to 256 Unicode characters
+ // in length.
+ Value *string `type:"string"`
+}
+
+// String returns the string representation
+func (s S3ObjectTag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s S3ObjectTag) GoString() string {
+ return s.String()
+}
+
+// SetKey sets the Key field's value.
+func (s *S3ObjectTag) SetKey(v string) *S3ObjectTag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *S3ObjectTag) SetValue(v string) *S3ObjectTag {
+ s.Value = &v
+ return s
+}
+
// Describes the storage parameters for S3 and S3 buckets for an instance store-backed
// AMI.
type S3Storage struct {
@@ -109784,8 +117078,7 @@ type ScheduledInstancesBlockDeviceMapping struct {
// launched.
Ebs *ScheduledInstancesEbs `type:"structure"`
- // Suppresses the specified device included in the block device mapping of the
- // AMI.
+ // To omit the device from the block device mapping, specify an empty string.
NoDevice *string `type:"string"`
// The virtual device name (ephemeralN). Instance store volumes are numbered
@@ -109849,10 +117142,11 @@ type ScheduledInstancesEbs struct {
// The number of I/O operations per second (IOPS) to provision for an io1 or
// io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB
// for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum
- // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
+ // IOPS of 64,000 is guaranteed only on instances built on the Nitro System
+ // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// Other instance families guarantee performance up to 32,000 IOPS. For more
- // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
+ // in the Amazon EC2 User Guide.
//
// This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes.
Iops *int64 `type:"integer"`
@@ -110506,9 +117800,6 @@ type SearchTransitGatewayMulticastGroupsInput struct {
//
// * source-type - The source type. Valid values are igmp | static.
//
- // * state - The state of the subnet association. Valid values are associated
- // | associated | disassociated | disassociating.
- //
// * subnet-id - The ID of the subnet.
//
// * transit-gateway-attachment-id - The id of the transit gateway attachment.
@@ -110746,7 +118037,7 @@ func (s *SearchTransitGatewayRoutesOutput) SetRoutes(v []*TransitGatewayRoute) *
return s
}
-// Describes a security group
+// Describes a security group.
type SecurityGroup struct {
_ struct{} `type:"structure"`
@@ -110765,7 +118056,7 @@ type SecurityGroup struct {
// [VPC only] The outbound rules associated with the security group.
IpPermissionsEgress []*IpPermission `locationName:"ipPermissionsEgress" locationNameList:"item" type:"list"`
- // The AWS account ID of the owner of the security group.
+ // The Amazon Web Services account ID of the owner of the security group.
OwnerId *string `locationName:"ownerId" type:"string"`
// Any tags assigned to the security group.
@@ -110908,6 +118199,325 @@ func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGr
return s
}
+// Describes a security group rule.
+type SecurityGroupRule struct {
+ _ struct{} `type:"structure"`
+
+ // The IPv4 CIDR range.
+ CidrIpv4 *string `locationName:"cidrIpv4" type:"string"`
+
+ // The IPv6 CIDR range.
+ CidrIpv6 *string `locationName:"cidrIpv6" type:"string"`
+
+ // The security group rule description.
+ Description *string `locationName:"description" type:"string"`
+
+ // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6
+ // type. A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6
+ // types, you must specify all codes.
+ FromPort *int64 `locationName:"fromPort" type:"integer"`
+
+ // The ID of the security group.
+ GroupId *string `locationName:"groupId" type:"string"`
+
+ // The ID of the Amazon Web Services account that owns the security group.
+ GroupOwnerId *string `locationName:"groupOwnerId" type:"string"`
+
+ // The IP protocol name (tcp, udp, icmp, icmpv6) or number (see Protocol Numbers
+ // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)).
+ //
+ // Use -1 to specify all protocols.
+ IpProtocol *string `locationName:"ipProtocol" type:"string"`
+
+ // Indicates whether the security group rule is an outbound rule.
+ IsEgress *bool `locationName:"isEgress" type:"boolean"`
+
+ // The ID of the prefix list.
+ PrefixListId *string `locationName:"prefixListId" type:"string"`
+
+ // Describes the security group that is referenced in the rule.
+ ReferencedGroupInfo *ReferencedSecurityGroup `locationName:"referencedGroupInfo" type:"structure"`
+
+ // The ID of the security group rule.
+ SecurityGroupRuleId *string `locationName:"securityGroupRuleId" type:"string"`
+
+ // The tags applied to the security group rule.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.
+ // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6
+ // types, you must specify all codes.
+ ToPort *int64 `locationName:"toPort" type:"integer"`
+}
+
+// String returns the string representation
+func (s SecurityGroupRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SecurityGroupRule) GoString() string {
+ return s.String()
+}
+
+// SetCidrIpv4 sets the CidrIpv4 field's value.
+func (s *SecurityGroupRule) SetCidrIpv4(v string) *SecurityGroupRule {
+ s.CidrIpv4 = &v
+ return s
+}
+
+// SetCidrIpv6 sets the CidrIpv6 field's value.
+func (s *SecurityGroupRule) SetCidrIpv6(v string) *SecurityGroupRule {
+ s.CidrIpv6 = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *SecurityGroupRule) SetDescription(v string) *SecurityGroupRule {
+ s.Description = &v
+ return s
+}
+
+// SetFromPort sets the FromPort field's value.
+func (s *SecurityGroupRule) SetFromPort(v int64) *SecurityGroupRule {
+ s.FromPort = &v
+ return s
+}
+
+// SetGroupId sets the GroupId field's value.
+func (s *SecurityGroupRule) SetGroupId(v string) *SecurityGroupRule {
+ s.GroupId = &v
+ return s
+}
+
+// SetGroupOwnerId sets the GroupOwnerId field's value.
+func (s *SecurityGroupRule) SetGroupOwnerId(v string) *SecurityGroupRule {
+ s.GroupOwnerId = &v
+ return s
+}
+
+// SetIpProtocol sets the IpProtocol field's value.
+func (s *SecurityGroupRule) SetIpProtocol(v string) *SecurityGroupRule {
+ s.IpProtocol = &v
+ return s
+}
+
+// SetIsEgress sets the IsEgress field's value.
+func (s *SecurityGroupRule) SetIsEgress(v bool) *SecurityGroupRule {
+ s.IsEgress = &v
+ return s
+}
+
+// SetPrefixListId sets the PrefixListId field's value.
+func (s *SecurityGroupRule) SetPrefixListId(v string) *SecurityGroupRule {
+ s.PrefixListId = &v
+ return s
+}
+
+// SetReferencedGroupInfo sets the ReferencedGroupInfo field's value.
+func (s *SecurityGroupRule) SetReferencedGroupInfo(v *ReferencedSecurityGroup) *SecurityGroupRule {
+ s.ReferencedGroupInfo = v
+ return s
+}
+
+// SetSecurityGroupRuleId sets the SecurityGroupRuleId field's value.
+func (s *SecurityGroupRule) SetSecurityGroupRuleId(v string) *SecurityGroupRule {
+ s.SecurityGroupRuleId = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *SecurityGroupRule) SetTags(v []*Tag) *SecurityGroupRule {
+ s.Tags = v
+ return s
+}
+
+// SetToPort sets the ToPort field's value.
+func (s *SecurityGroupRule) SetToPort(v int64) *SecurityGroupRule {
+ s.ToPort = &v
+ return s
+}
+
+// Describes the description of a security group rule.
+//
+// You can use this when you want to update the security group rule description
+// for either an inbound or outbound rule.
+type SecurityGroupRuleDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The description of the security group rule.
+ Description *string `type:"string"`
+
+ // The ID of the security group rule.
+ SecurityGroupRuleId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s SecurityGroupRuleDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SecurityGroupRuleDescription) GoString() string {
+ return s.String()
+}
+
+// SetDescription sets the Description field's value.
+func (s *SecurityGroupRuleDescription) SetDescription(v string) *SecurityGroupRuleDescription {
+ s.Description = &v
+ return s
+}
+
+// SetSecurityGroupRuleId sets the SecurityGroupRuleId field's value.
+func (s *SecurityGroupRuleDescription) SetSecurityGroupRuleId(v string) *SecurityGroupRuleDescription {
+ s.SecurityGroupRuleId = &v
+ return s
+}
+
+// Describes a security group rule.
+//
+// You must specify exactly one of the following parameters, based on the rule
+// type:
+//
+// * CidrIpv4
+//
+// * CidrIpv6
+//
+// * PrefixListId
+//
+// * ReferencedGroupId
+//
+// When you modify a rule, you cannot change the rule type. For example, if
+// the rule uses an IPv4 address range, you must use CidrIpv4 to specify a new
+// IPv4 address range.
+type SecurityGroupRuleRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The IPv4 CIDR range. To specify a single IPv4 address, use the /32 prefix
+ // length.
+ CidrIpv4 *string `type:"string"`
+
+ // The IPv6 CIDR range. To specify a single IPv6 address, use the /128 prefix
+ // length.
+ CidrIpv6 *string `type:"string"`
+
+ // The description of the security group rule.
+ Description *string `type:"string"`
+
+ // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6
+ // type. A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6
+ // types, you must specify all codes.
+ FromPort *int64 `type:"integer"`
+
+ // The IP protocol name (tcp, udp, icmp, icmpv6) or number (see Protocol Numbers
+ // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)).
+ //
+ // Use -1 to specify all protocols.
+ IpProtocol *string `type:"string"`
+
+ // The ID of the prefix list.
+ PrefixListId *string `type:"string"`
+
+ // The ID of the security group that is referenced in the security group rule.
+ ReferencedGroupId *string `type:"string"`
+
+ // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.
+ // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6
+ // types, you must specify all codes.
+ ToPort *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s SecurityGroupRuleRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SecurityGroupRuleRequest) GoString() string {
+ return s.String()
+}
+
+// SetCidrIpv4 sets the CidrIpv4 field's value.
+func (s *SecurityGroupRuleRequest) SetCidrIpv4(v string) *SecurityGroupRuleRequest {
+ s.CidrIpv4 = &v
+ return s
+}
+
+// SetCidrIpv6 sets the CidrIpv6 field's value.
+func (s *SecurityGroupRuleRequest) SetCidrIpv6(v string) *SecurityGroupRuleRequest {
+ s.CidrIpv6 = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *SecurityGroupRuleRequest) SetDescription(v string) *SecurityGroupRuleRequest {
+ s.Description = &v
+ return s
+}
+
+// SetFromPort sets the FromPort field's value.
+func (s *SecurityGroupRuleRequest) SetFromPort(v int64) *SecurityGroupRuleRequest {
+ s.FromPort = &v
+ return s
+}
+
+// SetIpProtocol sets the IpProtocol field's value.
+func (s *SecurityGroupRuleRequest) SetIpProtocol(v string) *SecurityGroupRuleRequest {
+ s.IpProtocol = &v
+ return s
+}
+
+// SetPrefixListId sets the PrefixListId field's value.
+func (s *SecurityGroupRuleRequest) SetPrefixListId(v string) *SecurityGroupRuleRequest {
+ s.PrefixListId = &v
+ return s
+}
+
+// SetReferencedGroupId sets the ReferencedGroupId field's value.
+func (s *SecurityGroupRuleRequest) SetReferencedGroupId(v string) *SecurityGroupRuleRequest {
+ s.ReferencedGroupId = &v
+ return s
+}
+
+// SetToPort sets the ToPort field's value.
+func (s *SecurityGroupRuleRequest) SetToPort(v int64) *SecurityGroupRuleRequest {
+ s.ToPort = &v
+ return s
+}
+
+// Describes an update to a security group rule.
+type SecurityGroupRuleUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the security group rule.
+ SecurityGroupRule *SecurityGroupRuleRequest `type:"structure"`
+
+ // The ID of the security group rule.
+ SecurityGroupRuleId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s SecurityGroupRuleUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SecurityGroupRuleUpdate) GoString() string {
+ return s.String()
+}
+
+// SetSecurityGroupRule sets the SecurityGroupRule field's value.
+func (s *SecurityGroupRuleUpdate) SetSecurityGroupRule(v *SecurityGroupRuleRequest) *SecurityGroupRuleUpdate {
+ s.SecurityGroupRule = v
+ return s
+}
+
+// SetSecurityGroupRuleId sets the SecurityGroupRuleId field's value.
+func (s *SecurityGroupRuleUpdate) SetSecurityGroupRuleId(v string) *SecurityGroupRuleUpdate {
+ s.SecurityGroupRuleId = &v
+ return s
+}
+
type SendDiagnosticInterruptInput struct {
_ struct{} `type:"structure"`
@@ -111374,16 +118984,21 @@ type Snapshot struct {
// Indicates whether the snapshot is encrypted.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
- // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS)
- // customer master key (CMK) that was used to protect the volume encryption
- // key for the parent volume.
+ // The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key
+ // that was used to protect the volume encryption key for the parent volume.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
- // The AWS owner alias, from an Amazon-maintained list (amazon). This is not
- // the user-configured AWS account alias set using the IAM console.
+ // The ARN of the Outpost on which the snapshot is stored. For more information,
+ // see Amazon EBS local snapshots on Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ OutpostArn *string `locationName:"outpostArn" type:"string"`
+
+ // The Amazon Web Services owner alias, from an Amazon-maintained list (amazon).
+ // This is not the user-configured Amazon Web Services account alias set using
+ // the IAM console.
OwnerAlias *string `locationName:"ownerAlias" type:"string"`
- // The AWS account ID of the EBS snapshot owner.
+ // The ID of the Amazon Web Services account that owns the EBS snapshot.
OwnerId *string `locationName:"ownerId" type:"string"`
// The progress of the snapshot, as a percentage.
@@ -111400,9 +119015,9 @@ type Snapshot struct {
State *string `locationName:"status" type:"string" enum:"SnapshotState"`
// Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy
- // operation fails (for example, if the proper AWS Key Management Service (AWS
- // KMS) permissions are not obtained) this field displays error state details
- // to help you diagnose why the error occurred. This parameter is only returned
+ // operation fails (for example, if the proper Key Management Service (KMS)
+ // permissions are not obtained) this field displays error state details to
+ // help you diagnose why the error occurred. This parameter is only returned
// by DescribeSnapshots.
StateMessage *string `locationName:"statusMessage" type:"string"`
@@ -111452,6 +119067,12 @@ func (s *Snapshot) SetKmsKeyId(v string) *Snapshot {
return s
}
+// SetOutpostArn sets the OutpostArn field's value.
+func (s *Snapshot) SetOutpostArn(v string) *Snapshot {
+ s.OutpostArn = &v
+ return s
+}
+
// SetOwnerAlias sets the OwnerAlias field's value.
func (s *Snapshot) SetOwnerAlias(v string) *Snapshot {
s.OwnerAlias = &v
@@ -111626,7 +119247,7 @@ type SnapshotDiskContainer struct {
// The format of the disk image being imported.
//
- // Valid values: VHD | VMDK
+ // Valid values: VHD | VMDK | RAW
Format *string `type:"string"`
// The URL to the Amazon S3-based disk image being imported. It can either be
@@ -111682,6 +119303,11 @@ type SnapshotInfo struct {
// Indicates whether the snapshot is encrypted.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
+ // The ARN of the Outpost on which the snapshot is stored. For more information,
+ // see Amazon EBS local snapshots on Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ OutpostArn *string `locationName:"outpostArn" type:"string"`
+
// Account id used when creating this snapshot.
OwnerId *string `locationName:"ownerId" type:"string"`
@@ -111730,6 +119356,12 @@ func (s *SnapshotInfo) SetEncrypted(v bool) *SnapshotInfo {
return s
}
+// SetOutpostArn sets the OutpostArn field's value.
+func (s *SnapshotInfo) SetOutpostArn(v string) *SnapshotInfo {
+ s.OutpostArn = &v
+ return s
+}
+
// SetOwnerId sets the OwnerId field's value.
func (s *SnapshotInfo) SetOwnerId(v string) *SnapshotInfo {
s.OwnerId = &v
@@ -111794,8 +119426,7 @@ type SnapshotTaskDetail struct {
// The format of the disk image from which the snapshot is created.
Format *string `locationName:"format" type:"string"`
- // The identifier for the AWS Key Management Service (AWS KMS) customer master
- // key (CMK) that was used to create the encrypted snapshot.
+ // The identifier for the KMS key that was used to create the encrypted snapshot.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// The percentage of completion for the import snapshot task.
@@ -111939,7 +119570,7 @@ type SpotDatafeedSubscription struct {
// The fault codes for the Spot Instance request, if any.
Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"`
- // The AWS account ID of the account.
+ // The Amazon Web Services account ID of the account.
OwnerId *string `locationName:"ownerId" type:"string"`
// The prefix for the data feed files.
@@ -112045,8 +119676,8 @@ type SpotFleetLaunchSpecification struct {
// The ID of the RAM disk. Some kernels require additional drivers at launch.
// Check the kernel requirements for information about whether you need to specify
- // a RAM disk. To find kernel requirements, refer to the AWS Resource Center
- // and search for the kernel ID.
+ // a RAM disk. To find kernel requirements, refer to the Amazon Web Services
+ // Resource Center and search for the kernel ID.
RamdiskId *string `locationName:"ramdiskId" type:"string"`
// One or more security groups. When requesting instances in a VPC, you must
@@ -112312,9 +119943,16 @@ type SpotFleetRequestConfigData struct {
// If the allocation strategy is diversified, Spot Fleet launches instances
// from all the Spot Instance pools that you specify.
//
- // If the allocation strategy is capacityOptimized, Spot Fleet launches instances
- // from Spot Instance pools with optimal capacity for the number of instances
- // that are launching.
+ // If the allocation strategy is capacityOptimized (recommended), Spot Fleet
+ // launches instances from Spot Instance pools with optimal capacity for the
+ // number of instances that are launching. To give certain instance types a
+ // higher chance of launching first, use capacityOptimizedPrioritized. Set a
+ // priority for each instance type by using the Priority parameter for LaunchTemplateOverrides.
+ // You can assign the same priority to different LaunchTemplateOverrides. EC2
+ // implements the priorities on a best-effort basis, but optimizes for capacity
+ // first. capacityOptimizedPrioritized is supported only if your Spot Fleet
+ // uses a launch template. Note that if the OnDemandAllocationStrategy is set
+ // to prioritized, the same priority is applied when fulfilling On-Demand capacity.
AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"`
// A unique, case-sensitive identifier that you provide to ensure the idempotency
@@ -112322,6 +119960,9 @@ type SpotFleetRequestConfigData struct {
// see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `locationName:"clientToken" type:"string"`
+ // Reserved.
+ Context *string `locationName:"context" type:"string"`
+
// Indicates whether running Spot Instances should be terminated if you decrease
// the target capacity of the Spot Fleet request below the current size of the
// Spot Fleet.
@@ -112331,7 +119972,7 @@ type SpotFleetRequestConfigData struct {
// capacity. You cannot set this value.
FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"`
- // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM)
+ // The Amazon Resource Name (ARN) of an Identity and Access Management (IAM)
// role that grants the Spot Fleet the permission to request, launch, terminate,
// and tag instances on your behalf. For more information, see Spot Fleet prerequisites
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites)
@@ -112350,6 +119991,15 @@ type SpotFleetRequestConfigData struct {
// Valid only when Spot AllocationStrategy is set to lowest-price. Spot Fleet
// selects the cheapest Spot pools and evenly allocates your target Spot capacity
// across the number of Spot pools that you specify.
+ //
+ // Note that Spot Fleet attempts to draw Spot Instances from the number of pools
+ // that you specify on a best effort basis. If a pool runs out of Spot capacity
+ // before fulfilling your target capacity, Spot Fleet will continue to fulfill
+ // your request by drawing from the next cheapest pool. To ensure that your
+ // target capacity is met, you might receive Spot Instances from more than the
+ // number of pools that you specified. Similarly, if most of the pools have
+ // no Spot capacity, you might receive your full target capacity from fewer
+ // than the number of pools that you specified.
InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"`
// The launch specifications for the Spot Fleet request. If you specify LaunchSpecifications,
@@ -112518,6 +120168,12 @@ func (s *SpotFleetRequestConfigData) SetClientToken(v string) *SpotFleetRequestC
return s
}
+// SetContext sets the Context field's value.
+func (s *SpotFleetRequestConfigData) SetContext(v string) *SpotFleetRequestConfigData {
+ s.Context = &v
+ return s
+}
+
// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value.
func (s *SpotFleetRequestConfigData) SetExcessCapacityTerminationPolicy(v string) *SpotFleetRequestConfigData {
s.ExcessCapacityTerminationPolicy = &v
@@ -112689,8 +120345,7 @@ func (s *SpotFleetTagSpecification) SetTags(v []*Tag) *SpotFleetTagSpecification
type SpotInstanceRequest struct {
_ struct{} `type:"structure"`
- // If you specified a duration and your Spot Instance request was fulfilled,
- // this is the fixed hourly price in effect for the Spot Instance while it runs.
+ // Deprecated.
ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"`
// The Availability Zone group. If you specify the same Availability Zone group
@@ -112698,7 +120353,7 @@ type SpotInstanceRequest struct {
// Availability Zone.
AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"`
- // The duration for the Spot Instance, in minutes.
+ // Deprecated.
BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"`
// The date and time when the Spot Instance request was created, in UTC format
@@ -112996,20 +120651,7 @@ func (s *SpotMaintenanceStrategies) SetCapacityRebalance(v *SpotCapacityRebalanc
type SpotMarketOptions struct {
_ struct{} `type:"structure"`
- // The required duration for the Spot Instances (also known as Spot blocks),
- // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300,
- // or 360).
- //
- // The duration period starts as soon as your Spot Instance receives its instance
- // ID. At the end of the duration period, Amazon EC2 marks the Spot Instance
- // for termination and provides a Spot Instance termination notice, which gives
- // the instance a two-minute warning before it terminates.
- //
- // You can't specify an Availability Zone group or a launch group if you specify
- // a duration.
- //
- // New accounts or accounts with no previous billing history with AWS are not
- // eligible for Spot Instances with a defined duration (also known as Spot blocks).
+ // Deprecated.
BlockDurationMinutes *int64 `type:"integer"`
// The behavior when a Spot Instance is interrupted. The default is terminate.
@@ -113020,8 +120662,8 @@ type SpotMarketOptions struct {
MaxPrice *string `type:"string"`
// The Spot Instance request type. For RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances),
- // persistent Spot Instance requests are only supported when InstanceInterruptionBehavior
- // is set to either hibernate or stop.
+ // persistent Spot Instance requests are only supported when the instance interruption
+ // behavior is either hibernate or stop.
SpotInstanceType *string `type:"string" enum:"SpotInstanceType"`
// The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported
@@ -113090,9 +120732,16 @@ type SpotOptions struct {
// If the allocation strategy is diversified, EC2 Fleet launches instances from
// all of the Spot Instance pools that you specify.
//
- // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances
- // from Spot Instance pools with optimal capacity for the number of instances
- // that are launching.
+ // If the allocation strategy is capacity-optimized (recommended), EC2 Fleet
+ // launches instances from Spot Instance pools with optimal capacity for the
+ // number of instances that are launching. To give certain instance types a
+ // higher chance of launching first, use capacity-optimized-prioritized. Set
+ // a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides.
+ // You can assign the same priority to different LaunchTemplateOverrides. EC2
+ // implements the priorities on a best-effort basis, but optimizes for capacity
+ // first. capacity-optimized-prioritized is supported only if your fleet uses
+ // a launch template. Note that if the On-Demand AllocationStrategy is set to
+ // prioritized, the same priority is applied when fulfilling On-Demand capacity.
AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"`
// The behavior when a Spot Instance is interrupted. The default is terminate.
@@ -113102,6 +120751,15 @@ type SpotOptions struct {
// Valid only when AllocationStrategy is set to lowest-price. EC2 Fleet selects
// the cheapest Spot pools and evenly allocates your target Spot capacity across
// the number of Spot pools that you specify.
+ //
+ // Note that EC2 Fleet attempts to draw Spot Instances from the number of pools
+ // that you specify on a best effort basis. If a pool runs out of Spot capacity
+ // before fulfilling your target capacity, EC2 Fleet will continue to fulfill
+ // your request by drawing from the next cheapest pool. To ensure that your
+ // target capacity is met, you might receive Spot Instances from more than the
+ // number of pools that you specified. Similarly, if most of the pools have
+ // no Spot capacity, you might receive your full target capacity from fewer
+ // than the number of pools that you specified.
InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"`
// The strategies for managing your workloads on your Spot Instances that will
@@ -113196,9 +120854,16 @@ type SpotOptionsRequest struct {
// If the allocation strategy is diversified, EC2 Fleet launches instances from
// all of the Spot Instance pools that you specify.
//
- // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances
- // from Spot Instance pools with optimal capacity for the number of instances
- // that are launching.
+ // If the allocation strategy is capacity-optimized (recommended), EC2 Fleet
+ // launches instances from Spot Instance pools with optimal capacity for the
+ // number of instances that are launching. To give certain instance types a
+ // higher chance of launching first, use capacity-optimized-prioritized. Set
+ // a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides.
+ // You can assign the same priority to different LaunchTemplateOverrides. EC2
+ // implements the priorities on a best-effort basis, but optimizes for capacity
+ // first. capacity-optimized-prioritized is supported only if your fleet uses
+ // a launch template. Note that if the On-Demand AllocationStrategy is set to
+ // prioritized, the same priority is applied when fulfilling On-Demand capacity.
AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"`
// The behavior when a Spot Instance is interrupted. The default is terminate.
@@ -113208,6 +120873,15 @@ type SpotOptionsRequest struct {
// Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet
// selects the cheapest Spot pools and evenly allocates your target Spot capacity
// across the number of Spot pools that you specify.
+ //
+ // Note that EC2 Fleet attempts to draw Spot Instances from the number of pools
+ // that you specify on a best effort basis. If a pool runs out of Spot capacity
+ // before fulfilling your target capacity, EC2 Fleet will continue to fulfill
+ // your request by drawing from the next cheapest pool. To ensure that your
+ // target capacity is met, you might receive Spot Instances from more than the
+ // number of pools that you specified. Similarly, if most of the pools have
+ // no Spot capacity, you might receive your full target capacity from fewer
+ // than the number of pools that you specified.
InstancePoolsToUseCount *int64 `type:"integer"`
// The strategies for managing your Spot Instances that are at an elevated risk
@@ -113624,7 +121298,7 @@ type StartNetworkInsightsAnalysisInput struct {
_ struct{} `type:"structure"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
- // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `type:"string" idempotencyToken:"true"`
// Checks whether you have the required permissions for the action, without
@@ -113885,7 +121559,7 @@ type StopInstancesInput struct {
// Hibernates the instance if the instance was enabled for hibernation at launch.
// If the instance cannot hibernate successfully, a normal shutdown occurs.
// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
//
// Default: false
Hibernate *bool `type:"boolean"`
@@ -114023,6 +121697,85 @@ func (s *StorageLocation) SetKey(v string) *StorageLocation {
return s
}
+// The information about the AMI store task, including the progress of the task.
+type StoreImageTaskResult struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the AMI that is being stored.
+ AmiId *string `locationName:"amiId" type:"string"`
+
+ // The name of the S3 bucket that contains the stored AMI object.
+ Bucket *string `locationName:"bucket" type:"string"`
+
+ // The progress of the task as a percentage.
+ ProgressPercentage *int64 `locationName:"progressPercentage" type:"integer"`
+
+ // The name of the stored AMI object in the bucket.
+ S3objectKey *string `locationName:"s3objectKey" type:"string"`
+
+ // If the tasks fails, the reason for the failure is returned. If the task succeeds,
+ // null is returned.
+ StoreTaskFailureReason *string `locationName:"storeTaskFailureReason" type:"string"`
+
+ // The state of the store task (InProgress, Completed, or Failed).
+ StoreTaskState *string `locationName:"storeTaskState" type:"string"`
+
+ // The time the task started.
+ TaskStartTime *time.Time `locationName:"taskStartTime" type:"timestamp"`
+}
+
+// String returns the string representation
+func (s StoreImageTaskResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StoreImageTaskResult) GoString() string {
+ return s.String()
+}
+
+// SetAmiId sets the AmiId field's value.
+func (s *StoreImageTaskResult) SetAmiId(v string) *StoreImageTaskResult {
+ s.AmiId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *StoreImageTaskResult) SetBucket(v string) *StoreImageTaskResult {
+ s.Bucket = &v
+ return s
+}
+
+// SetProgressPercentage sets the ProgressPercentage field's value.
+func (s *StoreImageTaskResult) SetProgressPercentage(v int64) *StoreImageTaskResult {
+ s.ProgressPercentage = &v
+ return s
+}
+
+// SetS3objectKey sets the S3objectKey field's value.
+func (s *StoreImageTaskResult) SetS3objectKey(v string) *StoreImageTaskResult {
+ s.S3objectKey = &v
+ return s
+}
+
+// SetStoreTaskFailureReason sets the StoreTaskFailureReason field's value.
+func (s *StoreImageTaskResult) SetStoreTaskFailureReason(v string) *StoreImageTaskResult {
+ s.StoreTaskFailureReason = &v
+ return s
+}
+
+// SetStoreTaskState sets the StoreTaskState field's value.
+func (s *StoreImageTaskResult) SetStoreTaskState(v string) *StoreImageTaskResult {
+ s.StoreTaskState = &v
+ return s
+}
+
+// SetTaskStartTime sets the TaskStartTime field's value.
+func (s *StoreImageTaskResult) SetTaskStartTime(v time.Time) *StoreImageTaskResult {
+ s.TaskStartTime = &v
+ return s
+}
+
// Describes a subnet.
type Subnet struct {
_ struct{} `type:"structure"`
@@ -114065,7 +121818,7 @@ type Subnet struct {
// The Amazon Resource Name (ARN) of the Outpost.
OutpostArn *string `locationName:"outpostArn" type:"string"`
- // The ID of the AWS account that owns the subnet.
+ // The ID of the Amazon Web Services account that owns the subnet.
OwnerId *string `locationName:"ownerId" type:"string"`
// The current state of the subnet.
@@ -114262,6 +122015,84 @@ func (s *SubnetCidrBlockState) SetStatusMessage(v string) *SubnetCidrBlockState
return s
}
+// Describes a subnet CIDR reservation.
+type SubnetCidrReservation struct {
+ _ struct{} `type:"structure"`
+
+ // The CIDR that has been reserved.
+ Cidr *string `locationName:"cidr" type:"string"`
+
+ // The description assigned to the subnet CIDR reservation.
+ Description *string `locationName:"description" type:"string"`
+
+ // The ID of the account that owns the subnet CIDR reservation.
+ OwnerId *string `locationName:"ownerId" type:"string"`
+
+ // The type of reservation.
+ ReservationType *string `locationName:"reservationType" type:"string" enum:"SubnetCidrReservationType"`
+
+ // The ID of the subnet CIDR reservation.
+ SubnetCidrReservationId *string `locationName:"subnetCidrReservationId" type:"string"`
+
+ // The ID of the subnet.
+ SubnetId *string `locationName:"subnetId" type:"string"`
+
+ // The tags assigned to the subnet CIDR reservation.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation
+func (s SubnetCidrReservation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SubnetCidrReservation) GoString() string {
+ return s.String()
+}
+
+// SetCidr sets the Cidr field's value.
+func (s *SubnetCidrReservation) SetCidr(v string) *SubnetCidrReservation {
+ s.Cidr = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *SubnetCidrReservation) SetDescription(v string) *SubnetCidrReservation {
+ s.Description = &v
+ return s
+}
+
+// SetOwnerId sets the OwnerId field's value.
+func (s *SubnetCidrReservation) SetOwnerId(v string) *SubnetCidrReservation {
+ s.OwnerId = &v
+ return s
+}
+
+// SetReservationType sets the ReservationType field's value.
+func (s *SubnetCidrReservation) SetReservationType(v string) *SubnetCidrReservation {
+ s.ReservationType = &v
+ return s
+}
+
+// SetSubnetCidrReservationId sets the SubnetCidrReservationId field's value.
+func (s *SubnetCidrReservation) SetSubnetCidrReservationId(v string) *SubnetCidrReservation {
+ s.SubnetCidrReservationId = &v
+ return s
+}
+
+// SetSubnetId sets the SubnetId field's value.
+func (s *SubnetCidrReservation) SetSubnetId(v string) *SubnetCidrReservation {
+ s.SubnetId = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *SubnetCidrReservation) SetTags(v []*Tag) *SubnetCidrReservation {
+ s.Tags = v
+ return s
+}
+
// Describes an IPv6 CIDR block associated with a subnet.
type SubnetIpv6CidrBlockAssociation struct {
_ struct{} `type:"structure"`
@@ -114452,15 +122283,16 @@ type TagSpecification struct {
// | customer-gateway | dedicated-host | dhcp-options | egress-only-internet-gateway
// | elastic-ip | elastic-gpu | export-image-task | export-instance-task | fleet
// | fpga-image | host-reservation | image| import-image-task | import-snapshot-task
- // | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair |
- // launch-template | local-gateway-route-table-vpc-association | placement-group
- // | prefix-list | natgateway | network-acl | network-interface | reserved-instances
- // |route-table | security-group| snapshot | spot-fleet-request | spot-instances-request
- // | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target
- // | transit-gateway | transit-gateway-attachment | transit-gateway-multicast-domain
- // | transit-gateway-route-table | volume |vpc | vpc-peering-connection | vpc-endpoint
- // (for interface and gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink)
- // | vpc-flow-log | vpn-connection | vpn-gateway.
+ // | instance | instance-event-window | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2
+ // | key-pair | launch-template | local-gateway-route-table-vpc-association
+ // | placement-group | prefix-list | natgateway | network-acl | network-interface
+ // | reserved-instances |route-table | security-group| snapshot | spot-fleet-request
+ // | spot-instances-request | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session
+ // | traffic-mirror-target | transit-gateway | transit-gateway-attachment |
+ // transit-gateway-multicast-domain | transit-gateway-route-table | volume |vpc
+ // | vpc-peering-connection | vpc-endpoint (for interface and gateway endpoints)
+ // | vpc-endpoint-service (for Amazon Web Services PrivateLink) | vpc-flow-log
+ // | vpn-connection | vpn-gateway.
//
// To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html).
ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
@@ -114505,7 +122337,7 @@ func (s *TagSpecification) SetTags(v []*Tag) *TagSpecification {
// you're willing to pay is reached, the fleet stops launching instances even
// if it hasn’t met the target capacity. The MaxTotalPrice parameters are
// located in OnDemandOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_OnDemandOptions.html)
-// and SpotOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptions)
+// and SpotOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptions).
type TargetCapacitySpecification struct {
_ struct{} `type:"structure"`
@@ -115576,7 +123408,7 @@ type TransitGateway struct {
// The transit gateway options.
Options *TransitGatewayOptions `locationName:"options" type:"structure"`
- // The ID of the AWS account ID that owns the transit gateway.
+ // The ID of the Amazon Web Services account that owns the transit gateway.
OwnerId *string `locationName:"ownerId" type:"string"`
// The state of the transit gateway.
@@ -115724,7 +123556,7 @@ type TransitGatewayAttachment struct {
// The ID of the resource.
ResourceId *string `locationName:"resourceId" type:"string"`
- // The ID of the AWS account that owns the resource.
+ // The ID of the Amazon Web Services account that owns the resource.
ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"`
// The resource type. Note that the tgw-peering resource type has been deprecated.
@@ -115742,7 +123574,7 @@ type TransitGatewayAttachment struct {
// The ID of the transit gateway.
TransitGatewayId *string `locationName:"transitGatewayId" type:"string"`
- // The ID of the AWS account that owns the transit gateway.
+ // The ID of the Amazon Web Services account that owns the transit gateway.
TransitGatewayOwnerId *string `locationName:"transitGatewayOwnerId" type:"string"`
}
@@ -116291,7 +124123,8 @@ type TransitGatewayMulticastDomain struct {
// The options for the transit gateway multicast domain.
Options *TransitGatewayMulticastDomainOptions `locationName:"options" type:"structure"`
- // The ID of the AWS account that owns the transit gateway multiicast domain.
+ // The ID of the Amazon Web Services account that owns the transit gateway multicast
+ // domain.
OwnerId *string `locationName:"ownerId" type:"string"`
// The state of the transit gateway multicast domain.
@@ -116375,8 +124208,8 @@ type TransitGatewayMulticastDomainAssociation struct {
// The ID of the resource.
ResourceId *string `locationName:"resourceId" type:"string"`
- // The ID of the AWS account that owns the transit gateway multicast domain
- // association resource.
+ // The ID of the Amazon Web Services account that owns the transit gateway multicast
+ // domain association resource.
ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"`
// The type of resource, for example a VPC attachment.
@@ -116436,7 +124269,7 @@ type TransitGatewayMulticastDomainAssociations struct {
// The ID of the resource.
ResourceId *string `locationName:"resourceId" type:"string"`
- // The ID of the AWS account that owns the resource.
+ // The ID of the Amazon Web Services account that owns the resource.
ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"`
// The type of resource, for example a VPC attachment.
@@ -116565,8 +124398,8 @@ type TransitGatewayMulticastGroup struct {
// The ID of the resource.
ResourceId *string `locationName:"resourceId" type:"string"`
- // The ID of the AWS account that owns the transit gateway multicast domain
- // group resource.
+ // The ID of the Amazon Web Services account that owns the transit gateway multicast
+ // domain group resource.
ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"`
// The type of resource, for example a VPC attachment.
@@ -117509,7 +125342,7 @@ type TransitGatewayVpcAttachment struct {
// The ID of the VPC.
VpcId *string `locationName:"vpcId" type:"string"`
- // The ID of the AWS account that owns the VPC.
+ // The ID of the Amazon Web Services account that owns the VPC.
VpcOwnerId *string `locationName:"vpcOwnerId" type:"string"`
}
@@ -117619,6 +125452,89 @@ func (s *TransitGatewayVpcAttachmentOptions) SetIpv6Support(v string) *TransitGa
return s
}
+//
+// Currently available in limited preview only. If you are interested in using
+// this feature, contact your account manager.
+//
+// Information about an association between a branch network interface with
+// a trunk network interface.
+type TrunkInterfaceAssociation struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the association.
+ AssociationId *string `locationName:"associationId" type:"string"`
+
+ // The ID of the branch network interface.
+ BranchInterfaceId *string `locationName:"branchInterfaceId" type:"string"`
+
+ // The application key when you use the GRE protocol.
+ GreKey *int64 `locationName:"greKey" type:"integer"`
+
+ // The interface protocol. Valid values are VLAN and GRE.
+ InterfaceProtocol *string `locationName:"interfaceProtocol" type:"string" enum:"InterfaceProtocolType"`
+
+ // The tags for the trunk interface association.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The ID of the trunk network interface.
+ TrunkInterfaceId *string `locationName:"trunkInterfaceId" type:"string"`
+
+ // The ID of the VLAN when you use the VLAN protocol.
+ VlanId *int64 `locationName:"vlanId" type:"integer"`
+}
+
+// String returns the string representation
+func (s TrunkInterfaceAssociation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TrunkInterfaceAssociation) GoString() string {
+ return s.String()
+}
+
+// SetAssociationId sets the AssociationId field's value.
+func (s *TrunkInterfaceAssociation) SetAssociationId(v string) *TrunkInterfaceAssociation {
+ s.AssociationId = &v
+ return s
+}
+
+// SetBranchInterfaceId sets the BranchInterfaceId field's value.
+func (s *TrunkInterfaceAssociation) SetBranchInterfaceId(v string) *TrunkInterfaceAssociation {
+ s.BranchInterfaceId = &v
+ return s
+}
+
+// SetGreKey sets the GreKey field's value.
+func (s *TrunkInterfaceAssociation) SetGreKey(v int64) *TrunkInterfaceAssociation {
+ s.GreKey = &v
+ return s
+}
+
+// SetInterfaceProtocol sets the InterfaceProtocol field's value.
+func (s *TrunkInterfaceAssociation) SetInterfaceProtocol(v string) *TrunkInterfaceAssociation {
+ s.InterfaceProtocol = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *TrunkInterfaceAssociation) SetTags(v []*Tag) *TrunkInterfaceAssociation {
+ s.Tags = v
+ return s
+}
+
+// SetTrunkInterfaceId sets the TrunkInterfaceId field's value.
+func (s *TrunkInterfaceAssociation) SetTrunkInterfaceId(v string) *TrunkInterfaceAssociation {
+ s.TrunkInterfaceId = &v
+ return s
+}
+
+// SetVlanId sets the VlanId field's value.
+func (s *TrunkInterfaceAssociation) SetVlanId(v int64) *TrunkInterfaceAssociation {
+ s.VlanId = &v
+ return s
+}
+
// The VPN tunnel options.
type TunnelOption struct {
_ struct{} `type:"structure"`
@@ -117814,9 +125730,10 @@ type UnassignIpv6AddressesInput struct {
_ struct{} `type:"structure"`
// The IPv6 addresses to unassign from the network interface.
- //
- // Ipv6Addresses is a required field
- Ipv6Addresses []*string `locationName:"ipv6Addresses" locationNameList:"item" type:"list" required:"true"`
+ Ipv6Addresses []*string `locationName:"ipv6Addresses" locationNameList:"item" type:"list"`
+
+ // One or more IPv6 prefixes to unassign from the network interface.
+ Ipv6Prefixes []*string `locationName:"Ipv6Prefix" locationNameList:"item" type:"list"`
// The ID of the network interface.
//
@@ -117837,9 +125754,6 @@ func (s UnassignIpv6AddressesInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *UnassignIpv6AddressesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UnassignIpv6AddressesInput"}
- if s.Ipv6Addresses == nil {
- invalidParams.Add(request.NewErrParamRequired("Ipv6Addresses"))
- }
if s.NetworkInterfaceId == nil {
invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
}
@@ -117856,6 +125770,12 @@ func (s *UnassignIpv6AddressesInput) SetIpv6Addresses(v []*string) *UnassignIpv6
return s
}
+// SetIpv6Prefixes sets the Ipv6Prefixes field's value.
+func (s *UnassignIpv6AddressesInput) SetIpv6Prefixes(v []*string) *UnassignIpv6AddressesInput {
+ s.Ipv6Prefixes = v
+ return s
+}
+
// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
func (s *UnassignIpv6AddressesInput) SetNetworkInterfaceId(v string) *UnassignIpv6AddressesInput {
s.NetworkInterfaceId = &v
@@ -117870,6 +125790,9 @@ type UnassignIpv6AddressesOutput struct {
// The IPv6 addresses that have been unassigned from the network interface.
UnassignedIpv6Addresses []*string `locationName:"unassignedIpv6Addresses" locationNameList:"item" type:"list"`
+
+ // The IPv4 prefixes that have been unassigned from the network interface.
+ UnassignedIpv6Prefixes []*string `locationName:"unassignedIpv6PrefixSet" locationNameList:"item" type:"list"`
}
// String returns the string representation
@@ -117894,10 +125817,19 @@ func (s *UnassignIpv6AddressesOutput) SetUnassignedIpv6Addresses(v []*string) *U
return s
}
+// SetUnassignedIpv6Prefixes sets the UnassignedIpv6Prefixes field's value.
+func (s *UnassignIpv6AddressesOutput) SetUnassignedIpv6Prefixes(v []*string) *UnassignIpv6AddressesOutput {
+ s.UnassignedIpv6Prefixes = v
+ return s
+}
+
// Contains the parameters for UnassignPrivateIpAddresses.
type UnassignPrivateIpAddressesInput struct {
_ struct{} `type:"structure"`
+ // The IPv4 prefixes to unassign from the network interface.
+ Ipv4Prefixes []*string `locationName:"Ipv4Prefix" locationNameList:"item" type:"list"`
+
// The ID of the network interface.
//
// NetworkInterfaceId is a required field
@@ -117905,9 +125837,7 @@ type UnassignPrivateIpAddressesInput struct {
// The secondary private IP addresses to unassign from the network interface.
// You can specify this option multiple times to unassign more than one IP address.
- //
- // PrivateIpAddresses is a required field
- PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list" required:"true"`
+ PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list"`
}
// String returns the string representation
@@ -117926,9 +125856,6 @@ func (s *UnassignPrivateIpAddressesInput) Validate() error {
if s.NetworkInterfaceId == nil {
invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
}
- if s.PrivateIpAddresses == nil {
- invalidParams.Add(request.NewErrParamRequired("PrivateIpAddresses"))
- }
if invalidParams.Len() > 0 {
return invalidParams
@@ -117936,6 +125863,12 @@ func (s *UnassignPrivateIpAddressesInput) Validate() error {
return nil
}
+// SetIpv4Prefixes sets the Ipv4Prefixes field's value.
+func (s *UnassignPrivateIpAddressesInput) SetIpv4Prefixes(v []*string) *UnassignPrivateIpAddressesInput {
+ s.Ipv4Prefixes = v
+ return s
+}
+
// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
func (s *UnassignPrivateIpAddressesInput) SetNetworkInterfaceId(v string) *UnassignPrivateIpAddressesInput {
s.NetworkInterfaceId = &v
@@ -118138,7 +126071,7 @@ func (s *UnsuccessfulItem) SetResourceId(v string) *UnsuccessfulItem {
}
// Information about the error that occurred. For more information about errors,
-// see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html).
+// see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html).
type UnsuccessfulItemError struct {
_ struct{} `type:"structure"`
@@ -118189,10 +126122,13 @@ type UpdateSecurityGroupRuleDescriptionsEgressInput struct {
// security group ID or the security group name in the request.
GroupName *string `type:"string"`
- // The IP permissions for the security group rule.
- //
- // IpPermissions is a required field
- IpPermissions []*IpPermission `locationNameList:"item" type:"list" required:"true"`
+ // The IP permissions for the security group rule. You must specify either the
+ // IP permissions or the description.
+ IpPermissions []*IpPermission `locationNameList:"item" type:"list"`
+
+ // The description for the egress security group rules. You must specify either
+ // the description or the IP permissions.
+ SecurityGroupRuleDescriptions []*SecurityGroupRuleDescription `locationName:"SecurityGroupRuleDescription" locationNameList:"item" type:"list"`
}
// String returns the string representation
@@ -118205,19 +126141,6 @@ func (s UpdateSecurityGroupRuleDescriptionsEgressInput) GoString() string {
return s.String()
}
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateSecurityGroupRuleDescriptionsEgressInput"}
- if s.IpPermissions == nil {
- invalidParams.Add(request.NewErrParamRequired("IpPermissions"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
// SetDryRun sets the DryRun field's value.
func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetDryRun(v bool) *UpdateSecurityGroupRuleDescriptionsEgressInput {
s.DryRun = &v
@@ -118242,6 +126165,12 @@ func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetIpPermissions(v []*I
return s
}
+// SetSecurityGroupRuleDescriptions sets the SecurityGroupRuleDescriptions field's value.
+func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetSecurityGroupRuleDescriptions(v []*SecurityGroupRuleDescription) *UpdateSecurityGroupRuleDescriptionsEgressInput {
+ s.SecurityGroupRuleDescriptions = v
+ return s
+}
+
type UpdateSecurityGroupRuleDescriptionsEgressOutput struct {
_ struct{} `type:"structure"`
@@ -118283,10 +126212,13 @@ type UpdateSecurityGroupRuleDescriptionsIngressInput struct {
// either the security group ID or the security group name in the request.
GroupName *string `type:"string"`
- // The IP permissions for the security group rule.
- //
- // IpPermissions is a required field
- IpPermissions []*IpPermission `locationNameList:"item" type:"list" required:"true"`
+ // The IP permissions for the security group rule. You must specify either IP
+ // permissions or a description.
+ IpPermissions []*IpPermission `locationNameList:"item" type:"list"`
+
+ // [VPC only] The description for the ingress security group rules. You must
+ // specify either a description or IP permissions.
+ SecurityGroupRuleDescriptions []*SecurityGroupRuleDescription `locationName:"SecurityGroupRuleDescription" locationNameList:"item" type:"list"`
}
// String returns the string representation
@@ -118299,19 +126231,6 @@ func (s UpdateSecurityGroupRuleDescriptionsIngressInput) GoString() string {
return s.String()
}
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateSecurityGroupRuleDescriptionsIngressInput"}
- if s.IpPermissions == nil {
- invalidParams.Add(request.NewErrParamRequired("IpPermissions"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
// SetDryRun sets the DryRun field's value.
func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetDryRun(v bool) *UpdateSecurityGroupRuleDescriptionsIngressInput {
s.DryRun = &v
@@ -118336,6 +126255,12 @@ func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetIpPermissions(v []*
return s
}
+// SetSecurityGroupRuleDescriptions sets the SecurityGroupRuleDescriptions field's value.
+func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetSecurityGroupRuleDescriptions(v []*SecurityGroupRuleDescription) *UpdateSecurityGroupRuleDescriptionsIngressInput {
+ s.SecurityGroupRuleDescriptions = v
+ return s
+}
+
type UpdateSecurityGroupRuleDescriptionsIngressOutput struct {
_ struct{} `type:"structure"`
@@ -118429,9 +126354,9 @@ func (s *UserBucketDetails) SetS3Key(v string) *UserBucketDetails {
type UserData struct {
_ struct{} `type:"structure" sensitive:"true"`
- // The user data. If you are using an AWS SDK or command line tool, Base64-encoding
- // is performed for you, and you can load the text from a file. Otherwise, you
- // must provide Base64-encoded text.
+ // The user data. If you are using an Amazon Web Services SDK or command line
+ // tool, Base64-encoding is performed for you, and you can load the text from
+ // a file. Otherwise, you must provide Base64-encoded text.
Data *string `locationName:"data" type:"string"`
}
@@ -118451,7 +126376,7 @@ func (s *UserData) SetData(v string) *UserData {
return s
}
-// Describes a security group and AWS account ID pair.
+// Describes a security group and Amazon Web Services account ID pair.
type UserIdGroupPair struct {
_ struct{} `type:"structure"`
@@ -118476,14 +126401,14 @@ type UserIdGroupPair struct {
// The status of a VPC peering connection, if applicable.
PeeringStatus *string `locationName:"peeringStatus" type:"string"`
- // The ID of an AWS account.
+ // The ID of an Amazon Web Services account.
//
// For a referenced security group in another VPC, the account ID of the referenced
// security group is returned in the response. If the referenced security group
// is deleted, this value is not returned.
//
// [EC2-Classic] Required when adding or removing rules that reference a security
- // group in another AWS account.
+ // group in another Amazon Web Services account.
UserId *string `locationName:"userId" type:"string"`
// The ID of the VPC for the referenced security group, if applicable.
@@ -118765,9 +126690,8 @@ type Volume struct {
// rate at which the volume accumulates I/O credits for bursting.
Iops *int64 `locationName:"iops" type:"integer"`
- // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS)
- // customer master key (CMK) that was used to protect the volume encryption
- // key for the volume.
+ // The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key
+ // that was used to protect the volume encryption key for the volume.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// Indicates whether Amazon EBS Multi-Attach is enabled.
@@ -119493,7 +127417,7 @@ type Vpc struct {
// Indicates whether the VPC is the default VPC.
IsDefault *bool `locationName:"isDefault" type:"boolean"`
- // The ID of the AWS account that owns the VPC.
+ // The ID of the Amazon Web Services account that owns the VPC.
OwnerId *string `locationName:"ownerId" type:"string"`
// The current state of the VPC.
@@ -120206,7 +128130,7 @@ type VpcPeeringConnectionVpcInfo struct {
// The IPv6 CIDR block for the VPC.
Ipv6CidrBlockSet []*Ipv6CidrBlock `locationName:"ipv6CidrBlockSet" locationNameList:"item" type:"list"`
- // The AWS account ID of the VPC owner.
+ // The ID of the Amazon Web Services account that owns the VPC.
OwnerId *string `locationName:"ownerId" type:"string"`
// Information about the VPC peering connection options for the accepter or
@@ -121087,6 +129011,18 @@ func ActivityStatus_Values() []string {
}
}
+const (
+ // AddressAttributeNameDomainName is a AddressAttributeName enum value
+ AddressAttributeNameDomainName = "domain-name"
+)
+
+// AddressAttributeName_Values returns all elements of the AddressAttributeName enum
+func AddressAttributeName_Values() []string {
+ return []string{
+ AddressAttributeNameDomainName,
+ }
+}
+
const (
// AffinityDefault is a Affinity enum value
AffinityDefault = "default"
@@ -121144,6 +129080,9 @@ const (
// AllocationStrategyCapacityOptimized is a AllocationStrategy enum value
AllocationStrategyCapacityOptimized = "capacityOptimized"
+
+ // AllocationStrategyCapacityOptimizedPrioritized is a AllocationStrategy enum value
+ AllocationStrategyCapacityOptimizedPrioritized = "capacityOptimizedPrioritized"
)
// AllocationStrategy_Values returns all elements of the AllocationStrategy enum
@@ -121152,6 +129091,7 @@ func AllocationStrategy_Values() []string {
AllocationStrategyLowestPrice,
AllocationStrategyDiversified,
AllocationStrategyCapacityOptimized,
+ AllocationStrategyCapacityOptimizedPrioritized,
}
}
@@ -121455,6 +129395,38 @@ func BgpStatus_Values() []string {
}
}
+const (
+ // BootModeTypeLegacyBios is a BootModeType enum value
+ BootModeTypeLegacyBios = "legacy-bios"
+
+ // BootModeTypeUefi is a BootModeType enum value
+ BootModeTypeUefi = "uefi"
+)
+
+// BootModeType_Values returns all elements of the BootModeType enum
+func BootModeType_Values() []string {
+ return []string{
+ BootModeTypeLegacyBios,
+ BootModeTypeUefi,
+ }
+}
+
+const (
+ // BootModeValuesLegacyBios is a BootModeValues enum value
+ BootModeValuesLegacyBios = "legacy-bios"
+
+ // BootModeValuesUefi is a BootModeValues enum value
+ BootModeValuesUefi = "uefi"
+)
+
+// BootModeValues_Values returns all elements of the BootModeValues enum
+func BootModeValues_Values() []string {
+ return []string{
+ BootModeValuesLegacyBios,
+ BootModeValuesUefi,
+ }
+}
+
const (
// BundleTaskStatePending is a BundleTaskState enum value
BundleTaskStatePending = "pending"
@@ -121895,6 +129867,22 @@ func ConnectionNotificationType_Values() []string {
}
}
+const (
+ // ConnectivityTypePrivate is a ConnectivityType enum value
+ ConnectivityTypePrivate = "private"
+
+ // ConnectivityTypePublic is a ConnectivityType enum value
+ ConnectivityTypePublic = "public"
+)
+
+// ConnectivityType_Values returns all elements of the ConnectivityType enum
+func ConnectivityType_Values() []string {
+ return []string{
+ ConnectivityTypePrivate,
+ ConnectivityTypePublic,
+ }
+}
+
const (
// ContainerFormatOva is a ContainerFormat enum value
ContainerFormatOva = "ova"
@@ -122808,6 +130796,9 @@ const (
// ImageAttributeNameSriovNetSupport is a ImageAttributeName enum value
ImageAttributeNameSriovNetSupport = "sriovNetSupport"
+
+ // ImageAttributeNameBootMode is a ImageAttributeName enum value
+ ImageAttributeNameBootMode = "bootMode"
)
// ImageAttributeName_Values returns all elements of the ImageAttributeName enum
@@ -122820,6 +130811,7 @@ func ImageAttributeName_Values() []string {
ImageAttributeNameProductCodes,
ImageAttributeNameBlockDeviceMapping,
ImageAttributeNameSriovNetSupport,
+ ImageAttributeNameBootMode,
}
}
@@ -122947,6 +130939,30 @@ func InstanceAttributeName_Values() []string {
}
}
+const (
+ // InstanceEventWindowStateCreating is a InstanceEventWindowState enum value
+ InstanceEventWindowStateCreating = "creating"
+
+ // InstanceEventWindowStateDeleting is a InstanceEventWindowState enum value
+ InstanceEventWindowStateDeleting = "deleting"
+
+ // InstanceEventWindowStateActive is a InstanceEventWindowState enum value
+ InstanceEventWindowStateActive = "active"
+
+ // InstanceEventWindowStateDeleted is a InstanceEventWindowState enum value
+ InstanceEventWindowStateDeleted = "deleted"
+)
+
+// InstanceEventWindowState_Values returns all elements of the InstanceEventWindowState enum
+func InstanceEventWindowState_Values() []string {
+ return []string{
+ InstanceEventWindowStateCreating,
+ InstanceEventWindowStateDeleting,
+ InstanceEventWindowStateActive,
+ InstanceEventWindowStateDeleted,
+ }
+}
+
const (
// InstanceHealthStatusHealthy is a InstanceHealthStatus enum value
InstanceHealthStatusHealthy = "healthy"
@@ -123063,6 +131079,22 @@ func InstanceMetadataOptionsState_Values() []string {
}
}
+const (
+ // InstanceMetadataProtocolStateDisabled is a InstanceMetadataProtocolState enum value
+ InstanceMetadataProtocolStateDisabled = "disabled"
+
+ // InstanceMetadataProtocolStateEnabled is a InstanceMetadataProtocolState enum value
+ InstanceMetadataProtocolStateEnabled = "enabled"
+)
+
+// InstanceMetadataProtocolState_Values returns all elements of the InstanceMetadataProtocolState enum
+func InstanceMetadataProtocolState_Values() []string {
+ return []string{
+ InstanceMetadataProtocolStateDisabled,
+ InstanceMetadataProtocolStateEnabled,
+ }
+}
+
const (
// InstanceStateNamePending is a InstanceStateName enum value
InstanceStateNamePending = "pending"
@@ -123801,6 +131833,12 @@ const (
// InstanceTypeG3sXlarge is a InstanceType enum value
InstanceTypeG3sXlarge = "g3s.xlarge"
+ // InstanceTypeG4adXlarge is a InstanceType enum value
+ InstanceTypeG4adXlarge = "g4ad.xlarge"
+
+ // InstanceTypeG4ad2xlarge is a InstanceType enum value
+ InstanceTypeG4ad2xlarge = "g4ad.2xlarge"
+
// InstanceTypeG4ad4xlarge is a InstanceType enum value
InstanceTypeG4ad4xlarge = "g4ad.4xlarge"
@@ -124065,6 +132103,18 @@ const (
// InstanceTypeZ1dMetal is a InstanceType enum value
InstanceTypeZ1dMetal = "z1d.metal"
+ // InstanceTypeU6tb156xlarge is a InstanceType enum value
+ InstanceTypeU6tb156xlarge = "u-6tb1.56xlarge"
+
+ // InstanceTypeU6tb1112xlarge is a InstanceType enum value
+ InstanceTypeU6tb1112xlarge = "u-6tb1.112xlarge"
+
+ // InstanceTypeU9tb1112xlarge is a InstanceType enum value
+ InstanceTypeU9tb1112xlarge = "u-9tb1.112xlarge"
+
+ // InstanceTypeU12tb1112xlarge is a InstanceType enum value
+ InstanceTypeU12tb1112xlarge = "u-12tb1.112xlarge"
+
// InstanceTypeU6tb1Metal is a InstanceType enum value
InstanceTypeU6tb1Metal = "u-6tb1.metal"
@@ -124122,6 +132172,9 @@ const (
// InstanceTypeM5dn24xlarge is a InstanceType enum value
InstanceTypeM5dn24xlarge = "m5dn.24xlarge"
+ // InstanceTypeM5dnMetal is a InstanceType enum value
+ InstanceTypeM5dnMetal = "m5dn.metal"
+
// InstanceTypeM5nLarge is a InstanceType enum value
InstanceTypeM5nLarge = "m5n.large"
@@ -124146,6 +132199,9 @@ const (
// InstanceTypeM5n24xlarge is a InstanceType enum value
InstanceTypeM5n24xlarge = "m5n.24xlarge"
+ // InstanceTypeM5nMetal is a InstanceType enum value
+ InstanceTypeM5nMetal = "m5n.metal"
+
// InstanceTypeR5dnLarge is a InstanceType enum value
InstanceTypeR5dnLarge = "r5dn.large"
@@ -124170,6 +132226,9 @@ const (
// InstanceTypeR5dn24xlarge is a InstanceType enum value
InstanceTypeR5dn24xlarge = "r5dn.24xlarge"
+ // InstanceTypeR5dnMetal is a InstanceType enum value
+ InstanceTypeR5dnMetal = "r5dn.metal"
+
// InstanceTypeR5nLarge is a InstanceType enum value
InstanceTypeR5nLarge = "r5n.large"
@@ -124194,6 +132253,9 @@ const (
// InstanceTypeR5n24xlarge is a InstanceType enum value
InstanceTypeR5n24xlarge = "r5n.24xlarge"
+ // InstanceTypeR5nMetal is a InstanceType enum value
+ InstanceTypeR5nMetal = "r5n.metal"
+
// InstanceTypeInf1Xlarge is a InstanceType enum value
InstanceTypeInf1Xlarge = "inf1.xlarge"
@@ -124260,8 +132322,62 @@ const (
// InstanceTypeM6gd16xlarge is a InstanceType enum value
InstanceTypeM6gd16xlarge = "m6gd.16xlarge"
+ // InstanceTypeM6iLarge is a InstanceType enum value
+ InstanceTypeM6iLarge = "m6i.large"
+
+ // InstanceTypeM6iXlarge is a InstanceType enum value
+ InstanceTypeM6iXlarge = "m6i.xlarge"
+
+ // InstanceTypeM6i2xlarge is a InstanceType enum value
+ InstanceTypeM6i2xlarge = "m6i.2xlarge"
+
+ // InstanceTypeM6i4xlarge is a InstanceType enum value
+ InstanceTypeM6i4xlarge = "m6i.4xlarge"
+
+ // InstanceTypeM6i8xlarge is a InstanceType enum value
+ InstanceTypeM6i8xlarge = "m6i.8xlarge"
+
+ // InstanceTypeM6i12xlarge is a InstanceType enum value
+ InstanceTypeM6i12xlarge = "m6i.12xlarge"
+
+ // InstanceTypeM6i16xlarge is a InstanceType enum value
+ InstanceTypeM6i16xlarge = "m6i.16xlarge"
+
+ // InstanceTypeM6i24xlarge is a InstanceType enum value
+ InstanceTypeM6i24xlarge = "m6i.24xlarge"
+
+ // InstanceTypeM6i32xlarge is a InstanceType enum value
+ InstanceTypeM6i32xlarge = "m6i.32xlarge"
+
// InstanceTypeMac1Metal is a InstanceType enum value
InstanceTypeMac1Metal = "mac1.metal"
+
+ // InstanceTypeX2gdMedium is a InstanceType enum value
+ InstanceTypeX2gdMedium = "x2gd.medium"
+
+ // InstanceTypeX2gdLarge is a InstanceType enum value
+ InstanceTypeX2gdLarge = "x2gd.large"
+
+ // InstanceTypeX2gdXlarge is a InstanceType enum value
+ InstanceTypeX2gdXlarge = "x2gd.xlarge"
+
+ // InstanceTypeX2gd2xlarge is a InstanceType enum value
+ InstanceTypeX2gd2xlarge = "x2gd.2xlarge"
+
+ // InstanceTypeX2gd4xlarge is a InstanceType enum value
+ InstanceTypeX2gd4xlarge = "x2gd.4xlarge"
+
+ // InstanceTypeX2gd8xlarge is a InstanceType enum value
+ InstanceTypeX2gd8xlarge = "x2gd.8xlarge"
+
+ // InstanceTypeX2gd12xlarge is a InstanceType enum value
+ InstanceTypeX2gd12xlarge = "x2gd.12xlarge"
+
+ // InstanceTypeX2gd16xlarge is a InstanceType enum value
+ InstanceTypeX2gd16xlarge = "x2gd.16xlarge"
+
+ // InstanceTypeX2gdMetal is a InstanceType enum value
+ InstanceTypeX2gdMetal = "x2gd.metal"
)
// InstanceType_Values returns all elements of the InstanceType enum
@@ -124502,6 +132618,8 @@ func InstanceType_Values() []string {
InstanceTypeG38xlarge,
InstanceTypeG316xlarge,
InstanceTypeG3sXlarge,
+ InstanceTypeG4adXlarge,
+ InstanceTypeG4ad2xlarge,
InstanceTypeG4ad4xlarge,
InstanceTypeG4ad8xlarge,
InstanceTypeG4ad16xlarge,
@@ -124590,6 +132708,10 @@ func InstanceType_Values() []string {
InstanceTypeZ1d6xlarge,
InstanceTypeZ1d12xlarge,
InstanceTypeZ1dMetal,
+ InstanceTypeU6tb156xlarge,
+ InstanceTypeU6tb1112xlarge,
+ InstanceTypeU9tb1112xlarge,
+ InstanceTypeU12tb1112xlarge,
InstanceTypeU6tb1Metal,
InstanceTypeU9tb1Metal,
InstanceTypeU12tb1Metal,
@@ -124609,6 +132731,7 @@ func InstanceType_Values() []string {
InstanceTypeM5dn12xlarge,
InstanceTypeM5dn16xlarge,
InstanceTypeM5dn24xlarge,
+ InstanceTypeM5dnMetal,
InstanceTypeM5nLarge,
InstanceTypeM5nXlarge,
InstanceTypeM5n2xlarge,
@@ -124617,6 +132740,7 @@ func InstanceType_Values() []string {
InstanceTypeM5n12xlarge,
InstanceTypeM5n16xlarge,
InstanceTypeM5n24xlarge,
+ InstanceTypeM5nMetal,
InstanceTypeR5dnLarge,
InstanceTypeR5dnXlarge,
InstanceTypeR5dn2xlarge,
@@ -124625,6 +132749,7 @@ func InstanceType_Values() []string {
InstanceTypeR5dn12xlarge,
InstanceTypeR5dn16xlarge,
InstanceTypeR5dn24xlarge,
+ InstanceTypeR5dnMetal,
InstanceTypeR5nLarge,
InstanceTypeR5nXlarge,
InstanceTypeR5n2xlarge,
@@ -124633,6 +132758,7 @@ func InstanceType_Values() []string {
InstanceTypeR5n12xlarge,
InstanceTypeR5n16xlarge,
InstanceTypeR5n24xlarge,
+ InstanceTypeR5nMetal,
InstanceTypeInf1Xlarge,
InstanceTypeInf12xlarge,
InstanceTypeInf16xlarge,
@@ -124655,7 +132781,25 @@ func InstanceType_Values() []string {
InstanceTypeM6gd8xlarge,
InstanceTypeM6gd12xlarge,
InstanceTypeM6gd16xlarge,
+ InstanceTypeM6iLarge,
+ InstanceTypeM6iXlarge,
+ InstanceTypeM6i2xlarge,
+ InstanceTypeM6i4xlarge,
+ InstanceTypeM6i8xlarge,
+ InstanceTypeM6i12xlarge,
+ InstanceTypeM6i16xlarge,
+ InstanceTypeM6i24xlarge,
+ InstanceTypeM6i32xlarge,
InstanceTypeMac1Metal,
+ InstanceTypeX2gdMedium,
+ InstanceTypeX2gdLarge,
+ InstanceTypeX2gdXlarge,
+ InstanceTypeX2gd2xlarge,
+ InstanceTypeX2gd4xlarge,
+ InstanceTypeX2gd8xlarge,
+ InstanceTypeX2gd12xlarge,
+ InstanceTypeX2gd16xlarge,
+ InstanceTypeX2gdMetal,
}
}
@@ -124691,6 +132835,22 @@ func InterfacePermissionType_Values() []string {
}
}
+const (
+ // InterfaceProtocolTypeVlan is a InterfaceProtocolType enum value
+ InterfaceProtocolTypeVlan = "VLAN"
+
+ // InterfaceProtocolTypeGre is a InterfaceProtocolType enum value
+ InterfaceProtocolTypeGre = "GRE"
+)
+
+// InterfaceProtocolType_Values returns all elements of the InterfaceProtocolType enum
+func InterfaceProtocolType_Values() []string {
+ return []string{
+ InterfaceProtocolTypeVlan,
+ InterfaceProtocolTypeGre,
+ }
+}
+
const (
// Ipv6SupportValueEnable is a Ipv6SupportValue enum value
Ipv6SupportValueEnable = "enable"
@@ -124707,6 +132867,22 @@ func Ipv6SupportValue_Values() []string {
}
}
+const (
+ // KeyTypeRsa is a KeyType enum value
+ KeyTypeRsa = "rsa"
+
+ // KeyTypeEd25519 is a KeyType enum value
+ KeyTypeEd25519 = "ed25519"
+)
+
+// KeyType_Values returns all elements of the KeyType enum
+func KeyType_Values() []string {
+ return []string{
+ KeyTypeRsa,
+ KeyTypeEd25519,
+ }
+}
+
const (
// LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist is a LaunchTemplateErrorCode enum value
LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist = "launchTemplateIdDoesNotExist"
@@ -124787,6 +132963,22 @@ func LaunchTemplateInstanceMetadataOptionsState_Values() []string {
}
}
+const (
+ // LaunchTemplateInstanceMetadataProtocolIpv6Disabled is a LaunchTemplateInstanceMetadataProtocolIpv6 enum value
+ LaunchTemplateInstanceMetadataProtocolIpv6Disabled = "disabled"
+
+ // LaunchTemplateInstanceMetadataProtocolIpv6Enabled is a LaunchTemplateInstanceMetadataProtocolIpv6 enum value
+ LaunchTemplateInstanceMetadataProtocolIpv6Enabled = "enabled"
+)
+
+// LaunchTemplateInstanceMetadataProtocolIpv6_Values returns all elements of the LaunchTemplateInstanceMetadataProtocolIpv6 enum
+func LaunchTemplateInstanceMetadataProtocolIpv6_Values() []string {
+ return []string{
+ LaunchTemplateInstanceMetadataProtocolIpv6Disabled,
+ LaunchTemplateInstanceMetadataProtocolIpv6Enabled,
+ }
+}
+
const (
// ListingStateAvailable is a ListingState enum value
ListingStateAvailable = "available"
@@ -125070,12 +133262,20 @@ func NetworkInterfaceAttribute_Values() []string {
const (
// NetworkInterfaceCreationTypeEfa is a NetworkInterfaceCreationType enum value
NetworkInterfaceCreationTypeEfa = "efa"
+
+ // NetworkInterfaceCreationTypeBranch is a NetworkInterfaceCreationType enum value
+ NetworkInterfaceCreationTypeBranch = "branch"
+
+ // NetworkInterfaceCreationTypeTrunk is a NetworkInterfaceCreationType enum value
+ NetworkInterfaceCreationTypeTrunk = "trunk"
)
// NetworkInterfaceCreationType_Values returns all elements of the NetworkInterfaceCreationType enum
func NetworkInterfaceCreationType_Values() []string {
return []string{
NetworkInterfaceCreationTypeEfa,
+ NetworkInterfaceCreationTypeBranch,
+ NetworkInterfaceCreationTypeTrunk,
}
}
@@ -125140,6 +133340,9 @@ const (
// NetworkInterfaceTypeEfa is a NetworkInterfaceType enum value
NetworkInterfaceTypeEfa = "efa"
+
+ // NetworkInterfaceTypeTrunk is a NetworkInterfaceType enum value
+ NetworkInterfaceTypeTrunk = "trunk"
)
// NetworkInterfaceType_Values returns all elements of the NetworkInterfaceType enum
@@ -125148,6 +133351,7 @@ func NetworkInterfaceType_Values() []string {
NetworkInterfaceTypeInterface,
NetworkInterfaceTypeNatGateway,
NetworkInterfaceTypeEfa,
+ NetworkInterfaceTypeTrunk,
}
}
@@ -125231,6 +133435,30 @@ func OperationType_Values() []string {
}
}
+const (
+ // PartitionLoadFrequencyNone is a PartitionLoadFrequency enum value
+ PartitionLoadFrequencyNone = "none"
+
+ // PartitionLoadFrequencyDaily is a PartitionLoadFrequency enum value
+ PartitionLoadFrequencyDaily = "daily"
+
+ // PartitionLoadFrequencyWeekly is a PartitionLoadFrequency enum value
+ PartitionLoadFrequencyWeekly = "weekly"
+
+ // PartitionLoadFrequencyMonthly is a PartitionLoadFrequency enum value
+ PartitionLoadFrequencyMonthly = "monthly"
+)
+
+// PartitionLoadFrequency_Values returns all elements of the PartitionLoadFrequency enum
+func PartitionLoadFrequency_Values() []string {
+ return []string{
+ PartitionLoadFrequencyNone,
+ PartitionLoadFrequencyDaily,
+ PartitionLoadFrequencyWeekly,
+ PartitionLoadFrequencyMonthly,
+ }
+}
+
const (
// PaymentOptionAllUpfront is a PaymentOption enum value
PaymentOptionAllUpfront = "AllUpfront"
@@ -125507,6 +133735,38 @@ func RecurringChargeFrequency_Values() []string {
}
}
+const (
+ // ReplaceRootVolumeTaskStatePending is a ReplaceRootVolumeTaskState enum value
+ ReplaceRootVolumeTaskStatePending = "pending"
+
+ // ReplaceRootVolumeTaskStateInProgress is a ReplaceRootVolumeTaskState enum value
+ ReplaceRootVolumeTaskStateInProgress = "in-progress"
+
+ // ReplaceRootVolumeTaskStateFailing is a ReplaceRootVolumeTaskState enum value
+ ReplaceRootVolumeTaskStateFailing = "failing"
+
+ // ReplaceRootVolumeTaskStateSucceeded is a ReplaceRootVolumeTaskState enum value
+ ReplaceRootVolumeTaskStateSucceeded = "succeeded"
+
+ // ReplaceRootVolumeTaskStateFailed is a ReplaceRootVolumeTaskState enum value
+ ReplaceRootVolumeTaskStateFailed = "failed"
+
+ // ReplaceRootVolumeTaskStateFailedDetached is a ReplaceRootVolumeTaskState enum value
+ ReplaceRootVolumeTaskStateFailedDetached = "failed-detached"
+)
+
+// ReplaceRootVolumeTaskState_Values returns all elements of the ReplaceRootVolumeTaskState enum
+func ReplaceRootVolumeTaskState_Values() []string {
+ return []string{
+ ReplaceRootVolumeTaskStatePending,
+ ReplaceRootVolumeTaskStateInProgress,
+ ReplaceRootVolumeTaskStateFailing,
+ ReplaceRootVolumeTaskStateSucceeded,
+ ReplaceRootVolumeTaskStateFailed,
+ ReplaceRootVolumeTaskStateFailedDetached,
+ }
+}
+
const (
// ReplacementStrategyLaunch is a ReplacementStrategy enum value
ReplacementStrategyLaunch = "launch"
@@ -125708,6 +133968,9 @@ const (
// ResourceTypeInstance is a ResourceType enum value
ResourceTypeInstance = "instance"
+ // ResourceTypeInstanceEventWindow is a ResourceType enum value
+ ResourceTypeInstanceEventWindow = "instance-event-window"
+
// ResourceTypeInternetGateway is a ResourceType enum value
ResourceTypeInternetGateway = "internet-gateway"
@@ -125747,6 +134010,9 @@ const (
// ResourceTypeSecurityGroup is a ResourceType enum value
ResourceTypeSecurityGroup = "security-group"
+ // ResourceTypeSecurityGroupRule is a ResourceType enum value
+ ResourceTypeSecurityGroupRule = "security-group-rule"
+
// ResourceTypeSnapshot is a ResourceType enum value
ResourceTypeSnapshot = "snapshot"
@@ -125821,6 +134087,7 @@ func ResourceType_Values() []string {
ResourceTypeImportImageTask,
ResourceTypeImportSnapshotTask,
ResourceTypeInstance,
+ ResourceTypeInstanceEventWindow,
ResourceTypeInternetGateway,
ResourceTypeKeyPair,
ResourceTypeLaunchTemplate,
@@ -125834,6 +134101,7 @@ func ResourceType_Values() []string {
ResourceTypeReservedInstances,
ResourceTypeRouteTable,
ResourceTypeSecurityGroup,
+ ResourceTypeSecurityGroupRule,
ResourceTypeSnapshot,
ResourceTypeSpotFleetRequest,
ResourceTypeSpotInstancesRequest,
@@ -126092,6 +134360,9 @@ const (
// SpotAllocationStrategyCapacityOptimized is a SpotAllocationStrategy enum value
SpotAllocationStrategyCapacityOptimized = "capacity-optimized"
+
+ // SpotAllocationStrategyCapacityOptimizedPrioritized is a SpotAllocationStrategy enum value
+ SpotAllocationStrategyCapacityOptimizedPrioritized = "capacity-optimized-prioritized"
)
// SpotAllocationStrategy_Values returns all elements of the SpotAllocationStrategy enum
@@ -126100,6 +134371,7 @@ func SpotAllocationStrategy_Values() []string {
SpotAllocationStrategyLowestPrice,
SpotAllocationStrategyDiversified,
SpotAllocationStrategyCapacityOptimized,
+ SpotAllocationStrategyCapacityOptimizedPrioritized,
}
}
@@ -126311,6 +134583,22 @@ func SubnetCidrBlockStateCode_Values() []string {
}
}
+const (
+ // SubnetCidrReservationTypePrefix is a SubnetCidrReservationType enum value
+ SubnetCidrReservationTypePrefix = "prefix"
+
+ // SubnetCidrReservationTypeExplicit is a SubnetCidrReservationType enum value
+ SubnetCidrReservationTypeExplicit = "explicit"
+)
+
+// SubnetCidrReservationType_Values returns all elements of the SubnetCidrReservationType enum
+func SubnetCidrReservationType_Values() []string {
+ return []string{
+ SubnetCidrReservationTypePrefix,
+ SubnetCidrReservationTypeExplicit,
+ }
+}
+
const (
// SubnetStatePending is a SubnetState enum value
SubnetStatePending = "pending"
@@ -127346,3 +135634,39 @@ func VpnStaticRouteSource_Values() []string {
VpnStaticRouteSourceStatic,
}
}
+
+const (
+ // WeekDaySunday is a WeekDay enum value
+ WeekDaySunday = "sunday"
+
+ // WeekDayMonday is a WeekDay enum value
+ WeekDayMonday = "monday"
+
+ // WeekDayTuesday is a WeekDay enum value
+ WeekDayTuesday = "tuesday"
+
+ // WeekDayWednesday is a WeekDay enum value
+ WeekDayWednesday = "wednesday"
+
+ // WeekDayThursday is a WeekDay enum value
+ WeekDayThursday = "thursday"
+
+ // WeekDayFriday is a WeekDay enum value
+ WeekDayFriday = "friday"
+
+ // WeekDaySaturday is a WeekDay enum value
+ WeekDaySaturday = "saturday"
+)
+
+// WeekDay_Values returns all elements of the WeekDay enum
+func WeekDay_Values() []string {
+ return []string{
+ WeekDaySunday,
+ WeekDayMonday,
+ WeekDayTuesday,
+ WeekDayWednesday,
+ WeekDayThursday,
+ WeekDayFriday,
+ WeekDaySaturday,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
index 31c314e0e..47c44cc9d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
@@ -4,8 +4,14 @@
// requests to Amazon Elastic Compute Cloud.
//
// Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing
-// capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest
+// capacity in the AWS Cloud. Using Amazon EC2 eliminates the need to invest
// in hardware up front, so you can develop and deploy applications faster.
+// Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically
+// isolated section of the AWS Cloud where you can launch AWS resources in a
+// virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS)
+// provides block level storage volumes for use with EC2 instances. EBS volumes
+// are highly available and reliable storage volumes that can be attached to
+// any running instance and used like a hard drive.
//
// To learn more, see the following resources:
//
@@ -13,7 +19,7 @@
// EC2 documentation (http://aws.amazon.com/documentation/ec2)
//
// * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon
-// EBS documentation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html)
+// EBS documentation (http://aws.amazon.com/documentation/ebs)
//
// * Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon
// VPC documentation (http://aws.amazon.com/documentation/vpc)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
index b9bdbde15..15b26e741 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
@@ -982,7 +982,7 @@ func (c *EC2) WaitUntilSecurityGroupExistsWithContext(ctx aws.Context, input *De
{
State: request.RetryWaiterState,
Matcher: request.ErrorWaiterMatch,
- Expected: "InvalidGroupNotFound",
+ Expected: "InvalidGroup.NotFound",
},
},
Logger: c.Config.Logger,
diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go
new file mode 100644
index 000000000..d52b59dcf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go
@@ -0,0 +1,40925 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package lightsail
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+)
+
+const opAllocateStaticIp = "AllocateStaticIp"
+
+// AllocateStaticIpRequest generates a "aws/request.Request" representing the
+// client's request for the AllocateStaticIp operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AllocateStaticIp for more information on using the AllocateStaticIp
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AllocateStaticIpRequest method.
+// req, resp := client.AllocateStaticIpRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AllocateStaticIp
+func (c *Lightsail) AllocateStaticIpRequest(input *AllocateStaticIpInput) (req *request.Request, output *AllocateStaticIpOutput) {
+ op := &request.Operation{
+ Name: opAllocateStaticIp,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AllocateStaticIpInput{}
+ }
+
+ output = &AllocateStaticIpOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AllocateStaticIp API operation for Amazon Lightsail.
+//
+// Allocates a static IP address.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation AllocateStaticIp for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AllocateStaticIp
+func (c *Lightsail) AllocateStaticIp(input *AllocateStaticIpInput) (*AllocateStaticIpOutput, error) {
+ req, out := c.AllocateStaticIpRequest(input)
+ return out, req.Send()
+}
+
+// AllocateStaticIpWithContext is the same as AllocateStaticIp with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AllocateStaticIp for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) AllocateStaticIpWithContext(ctx aws.Context, input *AllocateStaticIpInput, opts ...request.Option) (*AllocateStaticIpOutput, error) {
+ req, out := c.AllocateStaticIpRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAttachCertificateToDistribution = "AttachCertificateToDistribution"
+
+// AttachCertificateToDistributionRequest generates a "aws/request.Request" representing the
+// client's request for the AttachCertificateToDistribution operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AttachCertificateToDistribution for more information on using the AttachCertificateToDistribution
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AttachCertificateToDistributionRequest method.
+// req, resp := client.AttachCertificateToDistributionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachCertificateToDistribution
+func (c *Lightsail) AttachCertificateToDistributionRequest(input *AttachCertificateToDistributionInput) (req *request.Request, output *AttachCertificateToDistributionOutput) {
+ op := &request.Operation{
+ Name: opAttachCertificateToDistribution,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AttachCertificateToDistributionInput{}
+ }
+
+ output = &AttachCertificateToDistributionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AttachCertificateToDistribution API operation for Amazon Lightsail.
+//
+// Attaches an SSL/TLS certificate to your Amazon Lightsail content delivery
+// network (CDN) distribution.
+//
+// After the certificate is attached, your distribution accepts HTTPS traffic
+// for all of the domains that are associated with the certificate.
+//
+// Use the CreateCertificate action to create a certificate that you can attach
+// to your distribution.
+//
+// Only certificates created in the us-east-1 AWS Region can be attached to
+// Lightsail distributions. Lightsail distributions are global resources that
+// can reference an origin in any AWS Region, and distribute its content globally.
+// However, all distributions are located in the us-east-1 Region.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation AttachCertificateToDistribution for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachCertificateToDistribution
+func (c *Lightsail) AttachCertificateToDistribution(input *AttachCertificateToDistributionInput) (*AttachCertificateToDistributionOutput, error) {
+ req, out := c.AttachCertificateToDistributionRequest(input)
+ return out, req.Send()
+}
+
+// AttachCertificateToDistributionWithContext is the same as AttachCertificateToDistribution with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AttachCertificateToDistribution for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) AttachCertificateToDistributionWithContext(ctx aws.Context, input *AttachCertificateToDistributionInput, opts ...request.Option) (*AttachCertificateToDistributionOutput, error) {
+ req, out := c.AttachCertificateToDistributionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAttachDisk = "AttachDisk"
+
+// AttachDiskRequest generates a "aws/request.Request" representing the
+// client's request for the AttachDisk operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AttachDisk for more information on using the AttachDisk
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AttachDiskRequest method.
+// req, resp := client.AttachDiskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachDisk
+func (c *Lightsail) AttachDiskRequest(input *AttachDiskInput) (req *request.Request, output *AttachDiskOutput) {
+ op := &request.Operation{
+ Name: opAttachDisk,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AttachDiskInput{}
+ }
+
+ output = &AttachDiskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AttachDisk API operation for Amazon Lightsail.
+//
+// Attaches a block storage disk to a running or stopped Lightsail instance
+// and exposes it to the instance with the specified disk name.
+//
+// The attach disk operation supports tag-based access control via resource
+// tags applied to the resource identified by disk name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation AttachDisk for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachDisk
+func (c *Lightsail) AttachDisk(input *AttachDiskInput) (*AttachDiskOutput, error) {
+ req, out := c.AttachDiskRequest(input)
+ return out, req.Send()
+}
+
+// AttachDiskWithContext is the same as AttachDisk with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AttachDisk for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) AttachDiskWithContext(ctx aws.Context, input *AttachDiskInput, opts ...request.Option) (*AttachDiskOutput, error) {
+ req, out := c.AttachDiskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAttachInstancesToLoadBalancer = "AttachInstancesToLoadBalancer"
+
+// AttachInstancesToLoadBalancerRequest generates a "aws/request.Request" representing the
+// client's request for the AttachInstancesToLoadBalancer operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AttachInstancesToLoadBalancer for more information on using the AttachInstancesToLoadBalancer
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AttachInstancesToLoadBalancerRequest method.
+// req, resp := client.AttachInstancesToLoadBalancerRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachInstancesToLoadBalancer
+func (c *Lightsail) AttachInstancesToLoadBalancerRequest(input *AttachInstancesToLoadBalancerInput) (req *request.Request, output *AttachInstancesToLoadBalancerOutput) {
+ op := &request.Operation{
+ Name: opAttachInstancesToLoadBalancer,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AttachInstancesToLoadBalancerInput{}
+ }
+
+ output = &AttachInstancesToLoadBalancerOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AttachInstancesToLoadBalancer API operation for Amazon Lightsail.
+//
+// Attaches one or more Lightsail instances to a load balancer.
+//
+// After some time, the instances are attached to the load balancer and the
+// health check status is available.
+//
+// The attach instances to load balancer operation supports tag-based access
+// control via resource tags applied to the resource identified by load balancer
+// name. For more information, see the Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation AttachInstancesToLoadBalancer for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachInstancesToLoadBalancer
+func (c *Lightsail) AttachInstancesToLoadBalancer(input *AttachInstancesToLoadBalancerInput) (*AttachInstancesToLoadBalancerOutput, error) {
+ req, out := c.AttachInstancesToLoadBalancerRequest(input)
+ return out, req.Send()
+}
+
+// AttachInstancesToLoadBalancerWithContext is the same as AttachInstancesToLoadBalancer with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AttachInstancesToLoadBalancer for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) AttachInstancesToLoadBalancerWithContext(ctx aws.Context, input *AttachInstancesToLoadBalancerInput, opts ...request.Option) (*AttachInstancesToLoadBalancerOutput, error) {
+ req, out := c.AttachInstancesToLoadBalancerRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAttachLoadBalancerTlsCertificate = "AttachLoadBalancerTlsCertificate"
+
+// AttachLoadBalancerTlsCertificateRequest generates a "aws/request.Request" representing the
+// client's request for the AttachLoadBalancerTlsCertificate operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AttachLoadBalancerTlsCertificate for more information on using the AttachLoadBalancerTlsCertificate
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AttachLoadBalancerTlsCertificateRequest method.
+// req, resp := client.AttachLoadBalancerTlsCertificateRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachLoadBalancerTlsCertificate
+func (c *Lightsail) AttachLoadBalancerTlsCertificateRequest(input *AttachLoadBalancerTlsCertificateInput) (req *request.Request, output *AttachLoadBalancerTlsCertificateOutput) {
+ op := &request.Operation{
+ Name: opAttachLoadBalancerTlsCertificate,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AttachLoadBalancerTlsCertificateInput{}
+ }
+
+ output = &AttachLoadBalancerTlsCertificateOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AttachLoadBalancerTlsCertificate API operation for Amazon Lightsail.
+//
+// Attaches a Transport Layer Security (TLS) certificate to your load balancer.
+// TLS is just an updated, more secure version of Secure Socket Layer (SSL).
+//
+// Once you create and validate your certificate, you can attach it to your
+// load balancer. You can also use this API to rotate the certificates on your
+// account. Use the AttachLoadBalancerTlsCertificate action with the non-attached
+// certificate, and it will replace the existing one and become the attached
+// certificate.
+//
+// The AttachLoadBalancerTlsCertificate operation supports tag-based access
+// control via resource tags applied to the resource identified by load balancer
+// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation AttachLoadBalancerTlsCertificate for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachLoadBalancerTlsCertificate
+func (c *Lightsail) AttachLoadBalancerTlsCertificate(input *AttachLoadBalancerTlsCertificateInput) (*AttachLoadBalancerTlsCertificateOutput, error) {
+ req, out := c.AttachLoadBalancerTlsCertificateRequest(input)
+ return out, req.Send()
+}
+
+// AttachLoadBalancerTlsCertificateWithContext is the same as AttachLoadBalancerTlsCertificate with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AttachLoadBalancerTlsCertificate for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) AttachLoadBalancerTlsCertificateWithContext(ctx aws.Context, input *AttachLoadBalancerTlsCertificateInput, opts ...request.Option) (*AttachLoadBalancerTlsCertificateOutput, error) {
+ req, out := c.AttachLoadBalancerTlsCertificateRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAttachStaticIp = "AttachStaticIp"
+
+// AttachStaticIpRequest generates a "aws/request.Request" representing the
+// client's request for the AttachStaticIp operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AttachStaticIp for more information on using the AttachStaticIp
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AttachStaticIpRequest method.
+// req, resp := client.AttachStaticIpRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachStaticIp
+func (c *Lightsail) AttachStaticIpRequest(input *AttachStaticIpInput) (req *request.Request, output *AttachStaticIpOutput) {
+ op := &request.Operation{
+ Name: opAttachStaticIp,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AttachStaticIpInput{}
+ }
+
+ output = &AttachStaticIpOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AttachStaticIp API operation for Amazon Lightsail.
+//
+// Attaches a static IP address to a specific Amazon Lightsail instance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation AttachStaticIp for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachStaticIp
+func (c *Lightsail) AttachStaticIp(input *AttachStaticIpInput) (*AttachStaticIpOutput, error) {
+ req, out := c.AttachStaticIpRequest(input)
+ return out, req.Send()
+}
+
+// AttachStaticIpWithContext is the same as AttachStaticIp with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AttachStaticIp for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) AttachStaticIpWithContext(ctx aws.Context, input *AttachStaticIpInput, opts ...request.Option) (*AttachStaticIpOutput, error) {
+ req, out := c.AttachStaticIpRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCloseInstancePublicPorts = "CloseInstancePublicPorts"
+
+// CloseInstancePublicPortsRequest generates a "aws/request.Request" representing the
+// client's request for the CloseInstancePublicPorts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CloseInstancePublicPorts for more information on using the CloseInstancePublicPorts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CloseInstancePublicPortsRequest method.
+// req, resp := client.CloseInstancePublicPortsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CloseInstancePublicPorts
+func (c *Lightsail) CloseInstancePublicPortsRequest(input *CloseInstancePublicPortsInput) (req *request.Request, output *CloseInstancePublicPortsOutput) {
+ op := &request.Operation{
+ Name: opCloseInstancePublicPorts,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CloseInstancePublicPortsInput{}
+ }
+
+ output = &CloseInstancePublicPortsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CloseInstancePublicPorts API operation for Amazon Lightsail.
+//
+// Closes ports for a specific Amazon Lightsail instance.
+//
+// The CloseInstancePublicPorts action supports tag-based access control via
+// resource tags applied to the resource identified by instanceName. For more
+// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CloseInstancePublicPorts for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CloseInstancePublicPorts
+func (c *Lightsail) CloseInstancePublicPorts(input *CloseInstancePublicPortsInput) (*CloseInstancePublicPortsOutput, error) {
+ req, out := c.CloseInstancePublicPortsRequest(input)
+ return out, req.Send()
+}
+
+// CloseInstancePublicPortsWithContext is the same as CloseInstancePublicPorts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CloseInstancePublicPorts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CloseInstancePublicPortsWithContext(ctx aws.Context, input *CloseInstancePublicPortsInput, opts ...request.Option) (*CloseInstancePublicPortsOutput, error) {
+ req, out := c.CloseInstancePublicPortsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCopySnapshot = "CopySnapshot"
+
+// CopySnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the CopySnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CopySnapshot for more information on using the CopySnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CopySnapshotRequest method.
+// req, resp := client.CopySnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CopySnapshot
+func (c *Lightsail) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) {
+ op := &request.Operation{
+ Name: opCopySnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CopySnapshotInput{}
+ }
+
+ output = &CopySnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CopySnapshot API operation for Amazon Lightsail.
+//
+// Copies a manual snapshot of an instance or disk as another manual snapshot,
+// or copies an automatic snapshot of an instance or disk as a manual snapshot.
+// This operation can also be used to copy a manual or automatic snapshot of
+// an instance or a disk from one AWS Region to another in Amazon Lightsail.
+//
+// When copying a manual snapshot, be sure to define the source region, source
+// snapshot name, and target snapshot name parameters.
+//
+// When copying an automatic snapshot, be sure to define the source region,
+// source resource name, target snapshot name, and either the restore date or
+// the use latest restorable auto snapshot parameters.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CopySnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CopySnapshot
+func (c *Lightsail) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) {
+ req, out := c.CopySnapshotRequest(input)
+ return out, req.Send()
+}
+
+// CopySnapshotWithContext is the same as CopySnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CopySnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CopySnapshotWithContext(ctx aws.Context, input *CopySnapshotInput, opts ...request.Option) (*CopySnapshotOutput, error) {
+ req, out := c.CopySnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateBucket = "CreateBucket"
+
+// CreateBucketRequest generates a "aws/request.Request" representing the
+// client's request for the CreateBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateBucket for more information on using the CreateBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateBucketRequest method.
+// req, resp := client.CreateBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucket
+func (c *Lightsail) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
+ op := &request.Operation{
+ Name: opCreateBucket,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateBucketInput{}
+ }
+
+ output = &CreateBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateBucket API operation for Amazon Lightsail.
+//
+// Creates an Amazon Lightsail bucket.
+//
+// A bucket is a cloud storage resource available in the Lightsail object storage
+// service. Use buckets to store objects such as data and its descriptive metadata.
+// For more information about buckets, see Buckets in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/buckets-in-amazon-lightsail)
+// in the Amazon Lightsail Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateBucket for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucket
+func (c *Lightsail) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ return out, req.Send()
+}
+
+// CreateBucketWithContext is the same as CreateBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateBucketAccessKey = "CreateBucketAccessKey"
+
+// CreateBucketAccessKeyRequest generates a "aws/request.Request" representing the
+// client's request for the CreateBucketAccessKey operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateBucketAccessKey for more information on using the CreateBucketAccessKey
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateBucketAccessKeyRequest method.
+// req, resp := client.CreateBucketAccessKeyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucketAccessKey
+func (c *Lightsail) CreateBucketAccessKeyRequest(input *CreateBucketAccessKeyInput) (req *request.Request, output *CreateBucketAccessKeyOutput) {
+ op := &request.Operation{
+ Name: opCreateBucketAccessKey,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateBucketAccessKeyInput{}
+ }
+
+ output = &CreateBucketAccessKeyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateBucketAccessKey API operation for Amazon Lightsail.
+//
+// Creates a new access key for the specified Amazon Lightsail bucket. Access
+// keys consist of an access key ID and corresponding secret access key.
+//
+// Access keys grant full programmatic access to the specified bucket and its
+// objects. You can have a maximum of two access keys per bucket. Use the GetBucketAccessKeys
+// action to get a list of current access keys for a specific bucket. For more
+// information about access keys, see Creating access keys for a bucket in Amazon
+// Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-bucket-access-keys)
+// in the Amazon Lightsail Developer Guide.
+//
+// The secretAccessKey value is returned only in response to the CreateBucketAccessKey
+// action. You can get a secret access key only when you first create an access
+// key; you cannot get the secret access key later. If you lose the secret access
+// key, you must create a new access key.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateBucketAccessKey for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucketAccessKey
+func (c *Lightsail) CreateBucketAccessKey(input *CreateBucketAccessKeyInput) (*CreateBucketAccessKeyOutput, error) {
+ req, out := c.CreateBucketAccessKeyRequest(input)
+ return out, req.Send()
+}
+
+// CreateBucketAccessKeyWithContext is the same as CreateBucketAccessKey with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBucketAccessKey for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateBucketAccessKeyWithContext(ctx aws.Context, input *CreateBucketAccessKeyInput, opts ...request.Option) (*CreateBucketAccessKeyOutput, error) {
+ req, out := c.CreateBucketAccessKeyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateCertificate = "CreateCertificate"
+
+// CreateCertificateRequest generates a "aws/request.Request" representing the
+// client's request for the CreateCertificate operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateCertificate for more information on using the CreateCertificate
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateCertificateRequest method.
+// req, resp := client.CreateCertificateRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateCertificate
+func (c *Lightsail) CreateCertificateRequest(input *CreateCertificateInput) (req *request.Request, output *CreateCertificateOutput) {
+ op := &request.Operation{
+ Name: opCreateCertificate,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateCertificateInput{}
+ }
+
+ output = &CreateCertificateOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateCertificate API operation for Amazon Lightsail.
+//
+// Creates an SSL/TLS certificate for an Amazon Lightsail content delivery network
+// (CDN) distribution and a container service.
+//
+// After the certificate is valid, use the AttachCertificateToDistribution action
+// to use the certificate and its domains with your distribution. Or use the
+// UpdateContainerService action to use the certificate and its domains with
+// your container service.
+//
+// Only certificates created in the us-east-1 AWS Region can be attached to
+// Lightsail distributions. Lightsail distributions are global resources that
+// can reference an origin in any AWS Region, and distribute its content globally.
+// However, all distributions are located in the us-east-1 Region.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateCertificate for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateCertificate
+func (c *Lightsail) CreateCertificate(input *CreateCertificateInput) (*CreateCertificateOutput, error) {
+ req, out := c.CreateCertificateRequest(input)
+ return out, req.Send()
+}
+
+// CreateCertificateWithContext is the same as CreateCertificate with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateCertificate for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateCertificateWithContext(ctx aws.Context, input *CreateCertificateInput, opts ...request.Option) (*CreateCertificateOutput, error) {
+ req, out := c.CreateCertificateRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateCloudFormationStack = "CreateCloudFormationStack"
+
+// CreateCloudFormationStackRequest generates a "aws/request.Request" representing the
+// client's request for the CreateCloudFormationStack operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateCloudFormationStack for more information on using the CreateCloudFormationStack
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateCloudFormationStackRequest method.
+// req, resp := client.CreateCloudFormationStackRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateCloudFormationStack
+func (c *Lightsail) CreateCloudFormationStackRequest(input *CreateCloudFormationStackInput) (req *request.Request, output *CreateCloudFormationStackOutput) {
+ op := &request.Operation{
+ Name: opCreateCloudFormationStack,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateCloudFormationStackInput{}
+ }
+
+ output = &CreateCloudFormationStackOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateCloudFormationStack API operation for Amazon Lightsail.
+//
+// Creates an AWS CloudFormation stack, which creates a new Amazon EC2 instance
+// from an exported Amazon Lightsail snapshot. This operation results in a CloudFormation
+// stack record that can be used to track the AWS CloudFormation stack created.
+// Use the get cloud formation stack records operation to get a list of the
+// CloudFormation stacks created.
+//
+// Wait until after your new Amazon EC2 instance is created before running the
+// create cloud formation stack operation again with the same export snapshot
+// record.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateCloudFormationStack for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateCloudFormationStack
+func (c *Lightsail) CreateCloudFormationStack(input *CreateCloudFormationStackInput) (*CreateCloudFormationStackOutput, error) {
+ req, out := c.CreateCloudFormationStackRequest(input)
+ return out, req.Send()
+}
+
+// CreateCloudFormationStackWithContext is the same as CreateCloudFormationStack with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateCloudFormationStack for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateCloudFormationStackWithContext(ctx aws.Context, input *CreateCloudFormationStackInput, opts ...request.Option) (*CreateCloudFormationStackOutput, error) {
+ req, out := c.CreateCloudFormationStackRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateContactMethod = "CreateContactMethod"
+
+// CreateContactMethodRequest generates a "aws/request.Request" representing the
+// client's request for the CreateContactMethod operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateContactMethod for more information on using the CreateContactMethod
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateContactMethodRequest method.
+// req, resp := client.CreateContactMethodRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContactMethod
+func (c *Lightsail) CreateContactMethodRequest(input *CreateContactMethodInput) (req *request.Request, output *CreateContactMethodOutput) {
+ op := &request.Operation{
+ Name: opCreateContactMethod,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateContactMethodInput{}
+ }
+
+ output = &CreateContactMethodOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateContactMethod API operation for Amazon Lightsail.
+//
+// Creates an email or SMS text message contact method.
+//
+// A contact method is used to send you notifications about your Amazon Lightsail
+// resources. You can add one email address and one mobile phone number contact
+// method in each AWS Region. However, SMS text messaging is not supported in
+// some AWS Regions, and SMS text messages cannot be sent to some countries/regions.
+// For more information, see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateContactMethod for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContactMethod
+func (c *Lightsail) CreateContactMethod(input *CreateContactMethodInput) (*CreateContactMethodOutput, error) {
+ req, out := c.CreateContactMethodRequest(input)
+ return out, req.Send()
+}
+
+// CreateContactMethodWithContext is the same as CreateContactMethod with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateContactMethod for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateContactMethodWithContext(ctx aws.Context, input *CreateContactMethodInput, opts ...request.Option) (*CreateContactMethodOutput, error) {
+ req, out := c.CreateContactMethodRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateContainerService = "CreateContainerService"
+
+// CreateContainerServiceRequest generates a "aws/request.Request" representing the
+// client's request for the CreateContainerService operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateContainerService for more information on using the CreateContainerService
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateContainerServiceRequest method.
+// req, resp := client.CreateContainerServiceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContainerService
+func (c *Lightsail) CreateContainerServiceRequest(input *CreateContainerServiceInput) (req *request.Request, output *CreateContainerServiceOutput) {
+ op := &request.Operation{
+ Name: opCreateContainerService,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateContainerServiceInput{}
+ }
+
+ output = &CreateContainerServiceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateContainerService API operation for Amazon Lightsail.
+//
+// Creates an Amazon Lightsail container service.
+//
+// A Lightsail container service is a compute resource to which you can deploy
+// containers. For more information, see Container services in Amazon Lightsail
+// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-container-services)
+// in the Lightsail Dev Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateContainerService for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContainerService
+func (c *Lightsail) CreateContainerService(input *CreateContainerServiceInput) (*CreateContainerServiceOutput, error) {
+ req, out := c.CreateContainerServiceRequest(input)
+ return out, req.Send()
+}
+
+// CreateContainerServiceWithContext is the same as CreateContainerService with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateContainerService for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateContainerServiceWithContext(ctx aws.Context, input *CreateContainerServiceInput, opts ...request.Option) (*CreateContainerServiceOutput, error) {
+ req, out := c.CreateContainerServiceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateContainerServiceDeployment = "CreateContainerServiceDeployment"
+
+// CreateContainerServiceDeploymentRequest generates a "aws/request.Request" representing the
+// client's request for the CreateContainerServiceDeployment operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateContainerServiceDeployment for more information on using the CreateContainerServiceDeployment
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateContainerServiceDeploymentRequest method.
+// req, resp := client.CreateContainerServiceDeploymentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContainerServiceDeployment
+func (c *Lightsail) CreateContainerServiceDeploymentRequest(input *CreateContainerServiceDeploymentInput) (req *request.Request, output *CreateContainerServiceDeploymentOutput) {
+ op := &request.Operation{
+ Name: opCreateContainerServiceDeployment,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateContainerServiceDeploymentInput{}
+ }
+
+ output = &CreateContainerServiceDeploymentOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateContainerServiceDeployment API operation for Amazon Lightsail.
+//
+// Creates a deployment for your Amazon Lightsail container service.
+//
+// A deployment specifies the containers that will be launched on the container
+// service and their settings, such as the ports to open, the environment variables
+// to apply, and the launch command to run. It also specifies the container
+// that will serve as the public endpoint of the deployment and its settings,
+// such as the HTTP or HTTPS port to use, and the health check configuration.
+//
+// You can deploy containers to your container service using container images
+// from a public registry like Docker Hub, or from your local machine. For more
+// information, see Creating container images for your Amazon Lightsail container
+// services (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-container-images)
+// in the Amazon Lightsail Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateContainerServiceDeployment for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContainerServiceDeployment
+func (c *Lightsail) CreateContainerServiceDeployment(input *CreateContainerServiceDeploymentInput) (*CreateContainerServiceDeploymentOutput, error) {
+ req, out := c.CreateContainerServiceDeploymentRequest(input)
+ return out, req.Send()
+}
+
+// CreateContainerServiceDeploymentWithContext is the same as CreateContainerServiceDeployment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateContainerServiceDeployment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateContainerServiceDeploymentWithContext(ctx aws.Context, input *CreateContainerServiceDeploymentInput, opts ...request.Option) (*CreateContainerServiceDeploymentOutput, error) {
+ req, out := c.CreateContainerServiceDeploymentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateContainerServiceRegistryLogin = "CreateContainerServiceRegistryLogin"
+
+// CreateContainerServiceRegistryLoginRequest generates a "aws/request.Request" representing the
+// client's request for the CreateContainerServiceRegistryLogin operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateContainerServiceRegistryLogin for more information on using the CreateContainerServiceRegistryLogin
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateContainerServiceRegistryLoginRequest method.
+// req, resp := client.CreateContainerServiceRegistryLoginRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContainerServiceRegistryLogin
+func (c *Lightsail) CreateContainerServiceRegistryLoginRequest(input *CreateContainerServiceRegistryLoginInput) (req *request.Request, output *CreateContainerServiceRegistryLoginOutput) {
+ op := &request.Operation{
+ Name: opCreateContainerServiceRegistryLogin,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateContainerServiceRegistryLoginInput{}
+ }
+
+ output = &CreateContainerServiceRegistryLoginOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateContainerServiceRegistryLogin API operation for Amazon Lightsail.
+//
+// Creates a temporary set of log in credentials that you can use to log in
+// to the Docker process on your local machine. After you're logged in, you
+// can use the native Docker commands to push your local container images to
+// the container image registry of your Amazon Lightsail account so that you
+// can use them with your Lightsail container service. The log in credentials
+// expire 12 hours after they are created, at which point you will need to create
+// a new set of log in credentials.
+//
+// You can only push container images to the container service registry of your
+// Lightsail account. You cannot pull container images or perform any other
+// container image management actions on the container service registry.
+//
+// After you push your container images to the container image registry of your
+// Lightsail account, use the RegisterContainerImage action to register the
+// pushed images to a specific Lightsail container service.
+//
+// This action is not required if you install and use the Lightsail Control
+// (lightsailctl) plugin to push container images to your Lightsail container
+// service. For more information, see Pushing and managing container images
+// on your Amazon Lightsail container services (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-pushing-container-images)
+// in the Amazon Lightsail Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateContainerServiceRegistryLogin for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateContainerServiceRegistryLogin
+func (c *Lightsail) CreateContainerServiceRegistryLogin(input *CreateContainerServiceRegistryLoginInput) (*CreateContainerServiceRegistryLoginOutput, error) {
+ req, out := c.CreateContainerServiceRegistryLoginRequest(input)
+ return out, req.Send()
+}
+
+// CreateContainerServiceRegistryLoginWithContext is the same as CreateContainerServiceRegistryLogin with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateContainerServiceRegistryLogin for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateContainerServiceRegistryLoginWithContext(ctx aws.Context, input *CreateContainerServiceRegistryLoginInput, opts ...request.Option) (*CreateContainerServiceRegistryLoginOutput, error) {
+ req, out := c.CreateContainerServiceRegistryLoginRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateDisk = "CreateDisk"
+
+// CreateDiskRequest generates a "aws/request.Request" representing the
+// client's request for the CreateDisk operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateDisk for more information on using the CreateDisk
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateDiskRequest method.
+// req, resp := client.CreateDiskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDisk
+func (c *Lightsail) CreateDiskRequest(input *CreateDiskInput) (req *request.Request, output *CreateDiskOutput) {
+ op := &request.Operation{
+ Name: opCreateDisk,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateDiskInput{}
+ }
+
+ output = &CreateDiskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateDisk API operation for Amazon Lightsail.
+//
+// Creates a block storage disk that can be attached to an Amazon Lightsail
+// instance in the same Availability Zone (e.g., us-east-2a).
+//
+// The create disk operation supports tag-based access control via request tags.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateDisk for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDisk
+func (c *Lightsail) CreateDisk(input *CreateDiskInput) (*CreateDiskOutput, error) {
+ req, out := c.CreateDiskRequest(input)
+ return out, req.Send()
+}
+
+// CreateDiskWithContext is the same as CreateDisk with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateDisk for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateDiskWithContext(ctx aws.Context, input *CreateDiskInput, opts ...request.Option) (*CreateDiskOutput, error) {
+ req, out := c.CreateDiskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateDiskFromSnapshot = "CreateDiskFromSnapshot"
+
+// CreateDiskFromSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the CreateDiskFromSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateDiskFromSnapshot for more information on using the CreateDiskFromSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateDiskFromSnapshotRequest method.
+// req, resp := client.CreateDiskFromSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDiskFromSnapshot
+func (c *Lightsail) CreateDiskFromSnapshotRequest(input *CreateDiskFromSnapshotInput) (req *request.Request, output *CreateDiskFromSnapshotOutput) {
+ op := &request.Operation{
+ Name: opCreateDiskFromSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateDiskFromSnapshotInput{}
+ }
+
+ output = &CreateDiskFromSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateDiskFromSnapshot API operation for Amazon Lightsail.
+//
+// Creates a block storage disk from a manual or automatic snapshot of a disk.
+// The resulting disk can be attached to an Amazon Lightsail instance in the
+// same Availability Zone (e.g., us-east-2a).
+//
+// The create disk from snapshot operation supports tag-based access control
+// via request tags and resource tags applied to the resource identified by
+// disk snapshot name. For more information, see the Amazon Lightsail Developer
+// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateDiskFromSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDiskFromSnapshot
+func (c *Lightsail) CreateDiskFromSnapshot(input *CreateDiskFromSnapshotInput) (*CreateDiskFromSnapshotOutput, error) {
+ req, out := c.CreateDiskFromSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// CreateDiskFromSnapshotWithContext is the same as CreateDiskFromSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateDiskFromSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateDiskFromSnapshotWithContext(ctx aws.Context, input *CreateDiskFromSnapshotInput, opts ...request.Option) (*CreateDiskFromSnapshotOutput, error) {
+ req, out := c.CreateDiskFromSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateDiskSnapshot = "CreateDiskSnapshot"
+
+// CreateDiskSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the CreateDiskSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateDiskSnapshot for more information on using the CreateDiskSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateDiskSnapshotRequest method.
+// req, resp := client.CreateDiskSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDiskSnapshot
+func (c *Lightsail) CreateDiskSnapshotRequest(input *CreateDiskSnapshotInput) (req *request.Request, output *CreateDiskSnapshotOutput) {
+ op := &request.Operation{
+ Name: opCreateDiskSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateDiskSnapshotInput{}
+ }
+
+ output = &CreateDiskSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateDiskSnapshot API operation for Amazon Lightsail.
+//
+// Creates a snapshot of a block storage disk. You can use snapshots for backups,
+// to make copies of disks, and to save data before shutting down a Lightsail
+// instance.
+//
+// You can take a snapshot of an attached disk that is in use; however, snapshots
+// only capture data that has been written to your disk at the time the snapshot
+// command is issued. This may exclude any data that has been cached by any
+// applications or the operating system. If you can pause any file systems on
+// the disk long enough to take a snapshot, your snapshot should be complete.
+// Nevertheless, if you cannot pause all file writes to the disk, you should
+// unmount the disk from within the Lightsail instance, issue the create disk
+// snapshot command, and then remount the disk to ensure a consistent and complete
+// snapshot. You may remount and use your disk while the snapshot status is
+// pending.
+//
+// You can also use this operation to create a snapshot of an instance's system
+// volume. You might want to do this, for example, to recover data from the
+// system volume of a botched instance or to create a backup of the system volume
+// like you would for a block storage disk. To create a snapshot of a system
+// volume, just define the instance name parameter when issuing the snapshot
+// command, and a snapshot of the defined instance's system volume will be created.
+// After the snapshot is available, you can create a block storage disk from
+// the snapshot and attach it to a running instance to access the data on the
+// disk.
+//
+// The create disk snapshot operation supports tag-based access control via
+// request tags. For more information, see the Amazon Lightsail Developer Guide
+// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateDiskSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDiskSnapshot
+func (c *Lightsail) CreateDiskSnapshot(input *CreateDiskSnapshotInput) (*CreateDiskSnapshotOutput, error) {
+ req, out := c.CreateDiskSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// CreateDiskSnapshotWithContext is the same as CreateDiskSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateDiskSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateDiskSnapshotWithContext(ctx aws.Context, input *CreateDiskSnapshotInput, opts ...request.Option) (*CreateDiskSnapshotOutput, error) {
+ req, out := c.CreateDiskSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateDistribution = "CreateDistribution"
+
+// CreateDistributionRequest generates a "aws/request.Request" representing the
+// client's request for the CreateDistribution operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateDistribution for more information on using the CreateDistribution
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateDistributionRequest method.
+// req, resp := client.CreateDistributionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDistribution
+func (c *Lightsail) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) {
+ op := &request.Operation{
+ Name: opCreateDistribution,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateDistributionInput{}
+ }
+
+ output = &CreateDistributionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateDistribution API operation for Amazon Lightsail.
+//
+// Creates an Amazon Lightsail content delivery network (CDN) distribution.
+//
+// A distribution is a globally distributed network of caching servers that
+// improve the performance of your website or web application hosted on a Lightsail
+// instance. For more information, see Content delivery networks in Amazon Lightsail
+// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-content-delivery-network-distributions).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateDistribution for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDistribution
+func (c *Lightsail) CreateDistribution(input *CreateDistributionInput) (*CreateDistributionOutput, error) {
+ req, out := c.CreateDistributionRequest(input)
+ return out, req.Send()
+}
+
+// CreateDistributionWithContext is the same as CreateDistribution with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateDistribution for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateDistributionWithContext(ctx aws.Context, input *CreateDistributionInput, opts ...request.Option) (*CreateDistributionOutput, error) {
+ req, out := c.CreateDistributionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateDomain = "CreateDomain"
+
+// CreateDomainRequest generates a "aws/request.Request" representing the
+// client's request for the CreateDomain operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateDomain for more information on using the CreateDomain
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateDomainRequest method.
+// req, resp := client.CreateDomainRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDomain
+func (c *Lightsail) CreateDomainRequest(input *CreateDomainInput) (req *request.Request, output *CreateDomainOutput) {
+ op := &request.Operation{
+ Name: opCreateDomain,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateDomainInput{}
+ }
+
+ output = &CreateDomainOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateDomain API operation for Amazon Lightsail.
+//
+// Creates a domain resource for the specified domain (e.g., example.com).
+//
+// The create domain operation supports tag-based access control via request
+// tags. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateDomain for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDomain
+func (c *Lightsail) CreateDomain(input *CreateDomainInput) (*CreateDomainOutput, error) {
+ req, out := c.CreateDomainRequest(input)
+ return out, req.Send()
+}
+
+// CreateDomainWithContext is the same as CreateDomain with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateDomain for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateDomainWithContext(ctx aws.Context, input *CreateDomainInput, opts ...request.Option) (*CreateDomainOutput, error) {
+ req, out := c.CreateDomainRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateDomainEntry = "CreateDomainEntry"
+
+// CreateDomainEntryRequest generates a "aws/request.Request" representing the
+// client's request for the CreateDomainEntry operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateDomainEntry for more information on using the CreateDomainEntry
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateDomainEntryRequest method.
+// req, resp := client.CreateDomainEntryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDomainEntry
+func (c *Lightsail) CreateDomainEntryRequest(input *CreateDomainEntryInput) (req *request.Request, output *CreateDomainEntryOutput) {
+ op := &request.Operation{
+ Name: opCreateDomainEntry,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateDomainEntryInput{}
+ }
+
+ output = &CreateDomainEntryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateDomainEntry API operation for Amazon Lightsail.
+//
+// Creates one of the following domain name system (DNS) records in a domain
+// DNS zone: Address (A), canonical name (CNAME), mail exchanger (MX), name
+// server (NS), start of authority (SOA), service locator (SRV), or text (TXT).
+//
+// The create domain entry operation supports tag-based access control via resource
+// tags applied to the resource identified by domain name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateDomainEntry for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDomainEntry
+func (c *Lightsail) CreateDomainEntry(input *CreateDomainEntryInput) (*CreateDomainEntryOutput, error) {
+ req, out := c.CreateDomainEntryRequest(input)
+ return out, req.Send()
+}
+
+// CreateDomainEntryWithContext is the same as CreateDomainEntry with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateDomainEntry for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateDomainEntryWithContext(ctx aws.Context, input *CreateDomainEntryInput, opts ...request.Option) (*CreateDomainEntryOutput, error) {
+ req, out := c.CreateDomainEntryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateInstanceSnapshot = "CreateInstanceSnapshot"
+
+// CreateInstanceSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the CreateInstanceSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateInstanceSnapshot for more information on using the CreateInstanceSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateInstanceSnapshotRequest method.
+// req, resp := client.CreateInstanceSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateInstanceSnapshot
+func (c *Lightsail) CreateInstanceSnapshotRequest(input *CreateInstanceSnapshotInput) (req *request.Request, output *CreateInstanceSnapshotOutput) {
+ op := &request.Operation{
+ Name: opCreateInstanceSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateInstanceSnapshotInput{}
+ }
+
+ output = &CreateInstanceSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateInstanceSnapshot API operation for Amazon Lightsail.
+//
+// Creates a snapshot of a specific virtual private server, or instance. You
+// can use a snapshot to create a new instance that is based on that snapshot.
+//
+// The create instance snapshot operation supports tag-based access control
+// via request tags. For more information, see the Amazon Lightsail Developer
+// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateInstanceSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateInstanceSnapshot
+func (c *Lightsail) CreateInstanceSnapshot(input *CreateInstanceSnapshotInput) (*CreateInstanceSnapshotOutput, error) {
+ req, out := c.CreateInstanceSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// CreateInstanceSnapshotWithContext is the same as CreateInstanceSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateInstanceSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateInstanceSnapshotWithContext(ctx aws.Context, input *CreateInstanceSnapshotInput, opts ...request.Option) (*CreateInstanceSnapshotOutput, error) {
+ req, out := c.CreateInstanceSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateInstances = "CreateInstances"
+
+// CreateInstancesRequest generates a "aws/request.Request" representing the
+// client's request for the CreateInstances operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateInstances for more information on using the CreateInstances
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateInstancesRequest method.
+// req, resp := client.CreateInstancesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateInstances
+func (c *Lightsail) CreateInstancesRequest(input *CreateInstancesInput) (req *request.Request, output *CreateInstancesOutput) {
+ op := &request.Operation{
+ Name: opCreateInstances,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateInstancesInput{}
+ }
+
+ output = &CreateInstancesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateInstances API operation for Amazon Lightsail.
+//
+// Creates one or more Amazon Lightsail instances.
+//
+// The create instances operation supports tag-based access control via request
+// tags. For more information, see the Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateInstances for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateInstances
+func (c *Lightsail) CreateInstances(input *CreateInstancesInput) (*CreateInstancesOutput, error) {
+ req, out := c.CreateInstancesRequest(input)
+ return out, req.Send()
+}
+
+// CreateInstancesWithContext is the same as CreateInstances with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateInstances for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateInstancesWithContext(ctx aws.Context, input *CreateInstancesInput, opts ...request.Option) (*CreateInstancesOutput, error) {
+ req, out := c.CreateInstancesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateInstancesFromSnapshot = "CreateInstancesFromSnapshot"
+
+// CreateInstancesFromSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the CreateInstancesFromSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateInstancesFromSnapshot for more information on using the CreateInstancesFromSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateInstancesFromSnapshotRequest method.
+// req, resp := client.CreateInstancesFromSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateInstancesFromSnapshot
+func (c *Lightsail) CreateInstancesFromSnapshotRequest(input *CreateInstancesFromSnapshotInput) (req *request.Request, output *CreateInstancesFromSnapshotOutput) {
+ op := &request.Operation{
+ Name: opCreateInstancesFromSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateInstancesFromSnapshotInput{}
+ }
+
+ output = &CreateInstancesFromSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateInstancesFromSnapshot API operation for Amazon Lightsail.
+//
+// Creates one or more new instances from a manual or automatic snapshot of
+// an instance.
+//
+// The create instances from snapshot operation supports tag-based access control
+// via request tags and resource tags applied to the resource identified by
+// instance snapshot name. For more information, see the Amazon Lightsail Developer
+// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateInstancesFromSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateInstancesFromSnapshot
+func (c *Lightsail) CreateInstancesFromSnapshot(input *CreateInstancesFromSnapshotInput) (*CreateInstancesFromSnapshotOutput, error) {
+ req, out := c.CreateInstancesFromSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// CreateInstancesFromSnapshotWithContext is the same as CreateInstancesFromSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateInstancesFromSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateInstancesFromSnapshotWithContext(ctx aws.Context, input *CreateInstancesFromSnapshotInput, opts ...request.Option) (*CreateInstancesFromSnapshotOutput, error) {
+ req, out := c.CreateInstancesFromSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateKeyPair = "CreateKeyPair"
+
+// CreateKeyPairRequest generates a "aws/request.Request" representing the
+// client's request for the CreateKeyPair operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateKeyPair for more information on using the CreateKeyPair
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateKeyPairRequest method.
+// req, resp := client.CreateKeyPairRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateKeyPair
+func (c *Lightsail) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) {
+ op := &request.Operation{
+ Name: opCreateKeyPair,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateKeyPairInput{}
+ }
+
+ output = &CreateKeyPairOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateKeyPair API operation for Amazon Lightsail.
+//
+// Creates an SSH key pair.
+//
+// The create key pair operation supports tag-based access control via request
+// tags. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateKeyPair for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateKeyPair
+func (c *Lightsail) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) {
+ req, out := c.CreateKeyPairRequest(input)
+ return out, req.Send()
+}
+
+// CreateKeyPairWithContext is the same as CreateKeyPair with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateKeyPair for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateKeyPairWithContext(ctx aws.Context, input *CreateKeyPairInput, opts ...request.Option) (*CreateKeyPairOutput, error) {
+ req, out := c.CreateKeyPairRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateLoadBalancer = "CreateLoadBalancer"
+
+// CreateLoadBalancerRequest generates a "aws/request.Request" representing the
+// client's request for the CreateLoadBalancer operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateLoadBalancer for more information on using the CreateLoadBalancer
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateLoadBalancerRequest method.
+// req, resp := client.CreateLoadBalancerRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateLoadBalancer
+func (c *Lightsail) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *request.Request, output *CreateLoadBalancerOutput) {
+ op := &request.Operation{
+ Name: opCreateLoadBalancer,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateLoadBalancerInput{}
+ }
+
+ output = &CreateLoadBalancerOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateLoadBalancer API operation for Amazon Lightsail.
+//
+// Creates a Lightsail load balancer. To learn more about deciding whether to
+// load balance your application, see Configure your Lightsail instances for
+// load balancing (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/configure-lightsail-instances-for-load-balancing).
+// You can create up to 5 load balancers per AWS Region in your account.
+//
+// When you create a load balancer, you can specify a unique name and port settings.
+// To change additional load balancer settings, use the UpdateLoadBalancerAttribute
+// operation.
+//
+// The create load balancer operation supports tag-based access control via
+// request tags. For more information, see the Amazon Lightsail Developer Guide
+// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateLoadBalancer for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateLoadBalancer
+func (c *Lightsail) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBalancerOutput, error) {
+ req, out := c.CreateLoadBalancerRequest(input)
+ return out, req.Send()
+}
+
+// CreateLoadBalancerWithContext is the same as CreateLoadBalancer with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateLoadBalancer for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateLoadBalancerWithContext(ctx aws.Context, input *CreateLoadBalancerInput, opts ...request.Option) (*CreateLoadBalancerOutput, error) {
+ req, out := c.CreateLoadBalancerRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateLoadBalancerTlsCertificate = "CreateLoadBalancerTlsCertificate"
+
+// CreateLoadBalancerTlsCertificateRequest generates a "aws/request.Request" representing the
+// client's request for the CreateLoadBalancerTlsCertificate operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateLoadBalancerTlsCertificate for more information on using the CreateLoadBalancerTlsCertificate
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateLoadBalancerTlsCertificateRequest method.
+// req, resp := client.CreateLoadBalancerTlsCertificateRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateLoadBalancerTlsCertificate
+func (c *Lightsail) CreateLoadBalancerTlsCertificateRequest(input *CreateLoadBalancerTlsCertificateInput) (req *request.Request, output *CreateLoadBalancerTlsCertificateOutput) {
+ op := &request.Operation{
+ Name: opCreateLoadBalancerTlsCertificate,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateLoadBalancerTlsCertificateInput{}
+ }
+
+ output = &CreateLoadBalancerTlsCertificateOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateLoadBalancerTlsCertificate API operation for Amazon Lightsail.
+//
+// Creates an SSL/TLS certificate for an Amazon Lightsail load balancer.
+//
+// TLS is just an updated, more secure version of Secure Socket Layer (SSL).
+//
+// The CreateLoadBalancerTlsCertificate operation supports tag-based access
+// control via resource tags applied to the resource identified by load balancer
+// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateLoadBalancerTlsCertificate for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateLoadBalancerTlsCertificate
+func (c *Lightsail) CreateLoadBalancerTlsCertificate(input *CreateLoadBalancerTlsCertificateInput) (*CreateLoadBalancerTlsCertificateOutput, error) {
+ req, out := c.CreateLoadBalancerTlsCertificateRequest(input)
+ return out, req.Send()
+}
+
+// CreateLoadBalancerTlsCertificateWithContext is the same as CreateLoadBalancerTlsCertificate with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateLoadBalancerTlsCertificate for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateLoadBalancerTlsCertificateWithContext(ctx aws.Context, input *CreateLoadBalancerTlsCertificateInput, opts ...request.Option) (*CreateLoadBalancerTlsCertificateOutput, error) {
+ req, out := c.CreateLoadBalancerTlsCertificateRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateRelationalDatabase = "CreateRelationalDatabase"
+
+// CreateRelationalDatabaseRequest generates a "aws/request.Request" representing the
+// client's request for the CreateRelationalDatabase operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateRelationalDatabase for more information on using the CreateRelationalDatabase
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateRelationalDatabaseRequest method.
+// req, resp := client.CreateRelationalDatabaseRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateRelationalDatabase
+func (c *Lightsail) CreateRelationalDatabaseRequest(input *CreateRelationalDatabaseInput) (req *request.Request, output *CreateRelationalDatabaseOutput) {
+ op := &request.Operation{
+ Name: opCreateRelationalDatabase,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateRelationalDatabaseInput{}
+ }
+
+ output = &CreateRelationalDatabaseOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateRelationalDatabase API operation for Amazon Lightsail.
+//
+// Creates a new database in Amazon Lightsail.
+//
+// The create relational database operation supports tag-based access control
+// via request tags. For more information, see the Amazon Lightsail Developer
+// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateRelationalDatabase for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateRelationalDatabase
+func (c *Lightsail) CreateRelationalDatabase(input *CreateRelationalDatabaseInput) (*CreateRelationalDatabaseOutput, error) {
+ req, out := c.CreateRelationalDatabaseRequest(input)
+ return out, req.Send()
+}
+
+// CreateRelationalDatabaseWithContext is the same as CreateRelationalDatabase with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateRelationalDatabase for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateRelationalDatabaseWithContext(ctx aws.Context, input *CreateRelationalDatabaseInput, opts ...request.Option) (*CreateRelationalDatabaseOutput, error) {
+ req, out := c.CreateRelationalDatabaseRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateRelationalDatabaseFromSnapshot = "CreateRelationalDatabaseFromSnapshot"
+
+// CreateRelationalDatabaseFromSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the CreateRelationalDatabaseFromSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateRelationalDatabaseFromSnapshot for more information on using the CreateRelationalDatabaseFromSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateRelationalDatabaseFromSnapshotRequest method.
+// req, resp := client.CreateRelationalDatabaseFromSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateRelationalDatabaseFromSnapshot
+func (c *Lightsail) CreateRelationalDatabaseFromSnapshotRequest(input *CreateRelationalDatabaseFromSnapshotInput) (req *request.Request, output *CreateRelationalDatabaseFromSnapshotOutput) {
+ op := &request.Operation{
+ Name: opCreateRelationalDatabaseFromSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateRelationalDatabaseFromSnapshotInput{}
+ }
+
+ output = &CreateRelationalDatabaseFromSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateRelationalDatabaseFromSnapshot API operation for Amazon Lightsail.
+//
+// Creates a new database from an existing database snapshot in Amazon Lightsail.
+//
+// You can create a new database from a snapshot in if something goes wrong
+// with your original database, or to change it to a different plan, such as
+// a high availability or standard plan.
+//
+// The create relational database from snapshot operation supports tag-based
+// access control via request tags and resource tags applied to the resource
+// identified by relationalDatabaseSnapshotName. For more information, see the
+// Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateRelationalDatabaseFromSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateRelationalDatabaseFromSnapshot
+func (c *Lightsail) CreateRelationalDatabaseFromSnapshot(input *CreateRelationalDatabaseFromSnapshotInput) (*CreateRelationalDatabaseFromSnapshotOutput, error) {
+ req, out := c.CreateRelationalDatabaseFromSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// CreateRelationalDatabaseFromSnapshotWithContext is the same as CreateRelationalDatabaseFromSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateRelationalDatabaseFromSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateRelationalDatabaseFromSnapshotWithContext(ctx aws.Context, input *CreateRelationalDatabaseFromSnapshotInput, opts ...request.Option) (*CreateRelationalDatabaseFromSnapshotOutput, error) {
+ req, out := c.CreateRelationalDatabaseFromSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateRelationalDatabaseSnapshot = "CreateRelationalDatabaseSnapshot"
+
+// CreateRelationalDatabaseSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the CreateRelationalDatabaseSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateRelationalDatabaseSnapshot for more information on using the CreateRelationalDatabaseSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateRelationalDatabaseSnapshotRequest method.
+// req, resp := client.CreateRelationalDatabaseSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateRelationalDatabaseSnapshot
+func (c *Lightsail) CreateRelationalDatabaseSnapshotRequest(input *CreateRelationalDatabaseSnapshotInput) (req *request.Request, output *CreateRelationalDatabaseSnapshotOutput) {
+ op := &request.Operation{
+ Name: opCreateRelationalDatabaseSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateRelationalDatabaseSnapshotInput{}
+ }
+
+ output = &CreateRelationalDatabaseSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateRelationalDatabaseSnapshot API operation for Amazon Lightsail.
+//
+// Creates a snapshot of your database in Amazon Lightsail. You can use snapshots
+// for backups, to make copies of a database, and to save data before deleting
+// a database.
+//
+// The create relational database snapshot operation supports tag-based access
+// control via request tags. For more information, see the Amazon Lightsail
+// Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation CreateRelationalDatabaseSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateRelationalDatabaseSnapshot
+func (c *Lightsail) CreateRelationalDatabaseSnapshot(input *CreateRelationalDatabaseSnapshotInput) (*CreateRelationalDatabaseSnapshotOutput, error) {
+ req, out := c.CreateRelationalDatabaseSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// CreateRelationalDatabaseSnapshotWithContext is the same as CreateRelationalDatabaseSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateRelationalDatabaseSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) CreateRelationalDatabaseSnapshotWithContext(ctx aws.Context, input *CreateRelationalDatabaseSnapshotInput, opts ...request.Option) (*CreateRelationalDatabaseSnapshotOutput, error) {
+ req, out := c.CreateRelationalDatabaseSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteAlarm = "DeleteAlarm"
+
+// DeleteAlarmRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteAlarm operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteAlarm for more information on using the DeleteAlarm
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteAlarmRequest method.
+// req, resp := client.DeleteAlarmRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteAlarm
+func (c *Lightsail) DeleteAlarmRequest(input *DeleteAlarmInput) (req *request.Request, output *DeleteAlarmOutput) {
+ op := &request.Operation{
+ Name: opDeleteAlarm,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteAlarmInput{}
+ }
+
+ output = &DeleteAlarmOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteAlarm API operation for Amazon Lightsail.
+//
+// Deletes an alarm.
+//
+// An alarm is used to monitor a single metric for one of your resources. When
+// a metric condition is met, the alarm can notify you by email, SMS text message,
+// and a banner displayed on the Amazon Lightsail console. For more information,
+// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteAlarm for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteAlarm
+func (c *Lightsail) DeleteAlarm(input *DeleteAlarmInput) (*DeleteAlarmOutput, error) {
+ req, out := c.DeleteAlarmRequest(input)
+ return out, req.Send()
+}
+
+// DeleteAlarmWithContext is the same as DeleteAlarm with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteAlarm for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteAlarmWithContext(ctx aws.Context, input *DeleteAlarmInput, opts ...request.Option) (*DeleteAlarmOutput, error) {
+ req, out := c.DeleteAlarmRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteAutoSnapshot = "DeleteAutoSnapshot"
+
+// DeleteAutoSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteAutoSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteAutoSnapshot for more information on using the DeleteAutoSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteAutoSnapshotRequest method.
+// req, resp := client.DeleteAutoSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteAutoSnapshot
+func (c *Lightsail) DeleteAutoSnapshotRequest(input *DeleteAutoSnapshotInput) (req *request.Request, output *DeleteAutoSnapshotOutput) {
+ op := &request.Operation{
+ Name: opDeleteAutoSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteAutoSnapshotInput{}
+ }
+
+ output = &DeleteAutoSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteAutoSnapshot API operation for Amazon Lightsail.
+//
+// Deletes an automatic snapshot of an instance or disk. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteAutoSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteAutoSnapshot
+func (c *Lightsail) DeleteAutoSnapshot(input *DeleteAutoSnapshotInput) (*DeleteAutoSnapshotOutput, error) {
+ req, out := c.DeleteAutoSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// DeleteAutoSnapshotWithContext is the same as DeleteAutoSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteAutoSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteAutoSnapshotWithContext(ctx aws.Context, input *DeleteAutoSnapshotInput, opts ...request.Option) (*DeleteAutoSnapshotOutput, error) {
+ req, out := c.DeleteAutoSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucket = "DeleteBucket"
+
+// DeleteBucketRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucket for more information on using the DeleteBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketRequest method.
+// req, resp := client.DeleteBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucket
+func (c *Lightsail) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucket,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInput{}
+ }
+
+ output = &DeleteBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteBucket API operation for Amazon Lightsail.
+//
+// Deletes a Amazon Lightsail bucket.
+//
+// When you delete your bucket, the bucket name is released and can be reused
+// for a new bucket in your account or another AWS account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteBucket for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucket
+func (c *Lightsail) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketWithContext is the same as DeleteBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketAccessKey = "DeleteBucketAccessKey"
+
+// DeleteBucketAccessKeyRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketAccessKey operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketAccessKey for more information on using the DeleteBucketAccessKey
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketAccessKeyRequest method.
+// req, resp := client.DeleteBucketAccessKeyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucketAccessKey
+func (c *Lightsail) DeleteBucketAccessKeyRequest(input *DeleteBucketAccessKeyInput) (req *request.Request, output *DeleteBucketAccessKeyOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketAccessKey,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteBucketAccessKeyInput{}
+ }
+
+ output = &DeleteBucketAccessKeyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteBucketAccessKey API operation for Amazon Lightsail.
+//
+// Deletes an access key for the specified Amazon Lightsail bucket.
+//
+// We recommend that you delete an access key if the secret access key is compromised.
+//
+// For more information about access keys, see Creating access keys for a bucket
+// in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-bucket-access-keys)
+// in the Amazon Lightsail Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteBucketAccessKey for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucketAccessKey
+func (c *Lightsail) DeleteBucketAccessKey(input *DeleteBucketAccessKeyInput) (*DeleteBucketAccessKeyOutput, error) {
+ req, out := c.DeleteBucketAccessKeyRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketAccessKeyWithContext is the same as DeleteBucketAccessKey with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketAccessKey for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteBucketAccessKeyWithContext(ctx aws.Context, input *DeleteBucketAccessKeyInput, opts ...request.Option) (*DeleteBucketAccessKeyOutput, error) {
+ req, out := c.DeleteBucketAccessKeyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteCertificate = "DeleteCertificate"
+
+// DeleteCertificateRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteCertificate operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteCertificate for more information on using the DeleteCertificate
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteCertificateRequest method.
+// req, resp := client.DeleteCertificateRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteCertificate
+func (c *Lightsail) DeleteCertificateRequest(input *DeleteCertificateInput) (req *request.Request, output *DeleteCertificateOutput) {
+ op := &request.Operation{
+ Name: opDeleteCertificate,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteCertificateInput{}
+ }
+
+ output = &DeleteCertificateOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteCertificate API operation for Amazon Lightsail.
+//
+// Deletes an SSL/TLS certificate for your Amazon Lightsail content delivery
+// network (CDN) distribution.
+//
+// Certificates that are currently attached to a distribution cannot be deleted.
+// Use the DetachCertificateFromDistribution action to detach a certificate
+// from a distribution.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteCertificate for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteCertificate
+func (c *Lightsail) DeleteCertificate(input *DeleteCertificateInput) (*DeleteCertificateOutput, error) {
+ req, out := c.DeleteCertificateRequest(input)
+ return out, req.Send()
+}
+
+// DeleteCertificateWithContext is the same as DeleteCertificate with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteCertificate for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteCertificateWithContext(ctx aws.Context, input *DeleteCertificateInput, opts ...request.Option) (*DeleteCertificateOutput, error) {
+ req, out := c.DeleteCertificateRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteContactMethod = "DeleteContactMethod"
+
+// DeleteContactMethodRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteContactMethod operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteContactMethod for more information on using the DeleteContactMethod
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteContactMethodRequest method.
+// req, resp := client.DeleteContactMethodRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContactMethod
+func (c *Lightsail) DeleteContactMethodRequest(input *DeleteContactMethodInput) (req *request.Request, output *DeleteContactMethodOutput) {
+ op := &request.Operation{
+ Name: opDeleteContactMethod,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteContactMethodInput{}
+ }
+
+ output = &DeleteContactMethodOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteContactMethod API operation for Amazon Lightsail.
+//
+// Deletes a contact method.
+//
+// A contact method is used to send you notifications about your Amazon Lightsail
+// resources. You can add one email address and one mobile phone number contact
+// method in each AWS Region. However, SMS text messaging is not supported in
+// some AWS Regions, and SMS text messages cannot be sent to some countries/regions.
+// For more information, see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteContactMethod for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContactMethod
+func (c *Lightsail) DeleteContactMethod(input *DeleteContactMethodInput) (*DeleteContactMethodOutput, error) {
+ req, out := c.DeleteContactMethodRequest(input)
+ return out, req.Send()
+}
+
+// DeleteContactMethodWithContext is the same as DeleteContactMethod with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteContactMethod for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteContactMethodWithContext(ctx aws.Context, input *DeleteContactMethodInput, opts ...request.Option) (*DeleteContactMethodOutput, error) {
+ req, out := c.DeleteContactMethodRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteContainerImage = "DeleteContainerImage"
+
+// DeleteContainerImageRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteContainerImage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteContainerImage for more information on using the DeleteContainerImage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteContainerImageRequest method.
+// req, resp := client.DeleteContainerImageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContainerImage
+func (c *Lightsail) DeleteContainerImageRequest(input *DeleteContainerImageInput) (req *request.Request, output *DeleteContainerImageOutput) {
+ op := &request.Operation{
+ Name: opDeleteContainerImage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteContainerImageInput{}
+ }
+
+ output = &DeleteContainerImageOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteContainerImage API operation for Amazon Lightsail.
+//
+// Deletes a container image that is registered to your Amazon Lightsail container
+// service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteContainerImage for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContainerImage
+func (c *Lightsail) DeleteContainerImage(input *DeleteContainerImageInput) (*DeleteContainerImageOutput, error) {
+ req, out := c.DeleteContainerImageRequest(input)
+ return out, req.Send()
+}
+
+// DeleteContainerImageWithContext is the same as DeleteContainerImage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteContainerImage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteContainerImageWithContext(ctx aws.Context, input *DeleteContainerImageInput, opts ...request.Option) (*DeleteContainerImageOutput, error) {
+ req, out := c.DeleteContainerImageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteContainerService = "DeleteContainerService"
+
+// DeleteContainerServiceRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteContainerService operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteContainerService for more information on using the DeleteContainerService
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteContainerServiceRequest method.
+// req, resp := client.DeleteContainerServiceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContainerService
+func (c *Lightsail) DeleteContainerServiceRequest(input *DeleteContainerServiceInput) (req *request.Request, output *DeleteContainerServiceOutput) {
+ op := &request.Operation{
+ Name: opDeleteContainerService,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteContainerServiceInput{}
+ }
+
+ output = &DeleteContainerServiceOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteContainerService API operation for Amazon Lightsail.
+//
+// Deletes your Amazon Lightsail container service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteContainerService for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContainerService
+func (c *Lightsail) DeleteContainerService(input *DeleteContainerServiceInput) (*DeleteContainerServiceOutput, error) {
+ req, out := c.DeleteContainerServiceRequest(input)
+ return out, req.Send()
+}
+
+// DeleteContainerServiceWithContext is the same as DeleteContainerService with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteContainerService for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteContainerServiceWithContext(ctx aws.Context, input *DeleteContainerServiceInput, opts ...request.Option) (*DeleteContainerServiceOutput, error) {
+ req, out := c.DeleteContainerServiceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteDisk = "DeleteDisk"
+
+// DeleteDiskRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteDisk operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteDisk for more information on using the DeleteDisk
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteDiskRequest method.
+// req, resp := client.DeleteDiskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDisk
+func (c *Lightsail) DeleteDiskRequest(input *DeleteDiskInput) (req *request.Request, output *DeleteDiskOutput) {
+ op := &request.Operation{
+ Name: opDeleteDisk,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteDiskInput{}
+ }
+
+ output = &DeleteDiskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteDisk API operation for Amazon Lightsail.
+//
+// Deletes the specified block storage disk. The disk must be in the available
+// state (not attached to a Lightsail instance).
+//
+// The disk may remain in the deleting state for several minutes.
+//
+// The delete disk operation supports tag-based access control via resource
+// tags applied to the resource identified by disk name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteDisk for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDisk
+func (c *Lightsail) DeleteDisk(input *DeleteDiskInput) (*DeleteDiskOutput, error) {
+ req, out := c.DeleteDiskRequest(input)
+ return out, req.Send()
+}
+
+// DeleteDiskWithContext is the same as DeleteDisk with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteDisk for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteDiskWithContext(ctx aws.Context, input *DeleteDiskInput, opts ...request.Option) (*DeleteDiskOutput, error) {
+ req, out := c.DeleteDiskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteDiskSnapshot = "DeleteDiskSnapshot"
+
+// DeleteDiskSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteDiskSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteDiskSnapshot for more information on using the DeleteDiskSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteDiskSnapshotRequest method.
+// req, resp := client.DeleteDiskSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDiskSnapshot
+func (c *Lightsail) DeleteDiskSnapshotRequest(input *DeleteDiskSnapshotInput) (req *request.Request, output *DeleteDiskSnapshotOutput) {
+ op := &request.Operation{
+ Name: opDeleteDiskSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteDiskSnapshotInput{}
+ }
+
+ output = &DeleteDiskSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteDiskSnapshot API operation for Amazon Lightsail.
+//
+// Deletes the specified disk snapshot.
+//
+// When you make periodic snapshots of a disk, the snapshots are incremental,
+// and only the blocks on the device that have changed since your last snapshot
+// are saved in the new snapshot. When you delete a snapshot, only the data
+// not needed for any other snapshot is removed. So regardless of which prior
+// snapshots have been deleted, all active snapshots will have access to all
+// the information needed to restore the disk.
+//
+// The delete disk snapshot operation supports tag-based access control via
+// resource tags applied to the resource identified by disk snapshot name. For
+// more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteDiskSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDiskSnapshot
+func (c *Lightsail) DeleteDiskSnapshot(input *DeleteDiskSnapshotInput) (*DeleteDiskSnapshotOutput, error) {
+ req, out := c.DeleteDiskSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// DeleteDiskSnapshotWithContext is the same as DeleteDiskSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteDiskSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteDiskSnapshotWithContext(ctx aws.Context, input *DeleteDiskSnapshotInput, opts ...request.Option) (*DeleteDiskSnapshotOutput, error) {
+ req, out := c.DeleteDiskSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteDistribution = "DeleteDistribution"
+
+// DeleteDistributionRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteDistribution operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteDistribution for more information on using the DeleteDistribution
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteDistributionRequest method.
+// req, resp := client.DeleteDistributionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDistribution
+func (c *Lightsail) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) {
+ op := &request.Operation{
+ Name: opDeleteDistribution,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteDistributionInput{}
+ }
+
+ output = &DeleteDistributionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteDistribution API operation for Amazon Lightsail.
+//
+// Deletes your Amazon Lightsail content delivery network (CDN) distribution.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteDistribution for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDistribution
+func (c *Lightsail) DeleteDistribution(input *DeleteDistributionInput) (*DeleteDistributionOutput, error) {
+ req, out := c.DeleteDistributionRequest(input)
+ return out, req.Send()
+}
+
+// DeleteDistributionWithContext is the same as DeleteDistribution with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteDistribution for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteDistributionWithContext(ctx aws.Context, input *DeleteDistributionInput, opts ...request.Option) (*DeleteDistributionOutput, error) {
+ req, out := c.DeleteDistributionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteDomain = "DeleteDomain"
+
+// DeleteDomainRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteDomain operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteDomain for more information on using the DeleteDomain
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteDomainRequest method.
+// req, resp := client.DeleteDomainRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDomain
+func (c *Lightsail) DeleteDomainRequest(input *DeleteDomainInput) (req *request.Request, output *DeleteDomainOutput) {
+ op := &request.Operation{
+ Name: opDeleteDomain,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteDomainInput{}
+ }
+
+ output = &DeleteDomainOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteDomain API operation for Amazon Lightsail.
+//
+// Deletes the specified domain recordset and all of its domain records.
+//
+// The delete domain operation supports tag-based access control via resource
+// tags applied to the resource identified by domain name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteDomain for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDomain
+func (c *Lightsail) DeleteDomain(input *DeleteDomainInput) (*DeleteDomainOutput, error) {
+ req, out := c.DeleteDomainRequest(input)
+ return out, req.Send()
+}
+
+// DeleteDomainWithContext is the same as DeleteDomain with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteDomain for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteDomainWithContext(ctx aws.Context, input *DeleteDomainInput, opts ...request.Option) (*DeleteDomainOutput, error) {
+ req, out := c.DeleteDomainRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteDomainEntry = "DeleteDomainEntry"
+
+// DeleteDomainEntryRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteDomainEntry operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteDomainEntry for more information on using the DeleteDomainEntry
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteDomainEntryRequest method.
+// req, resp := client.DeleteDomainEntryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDomainEntry
+func (c *Lightsail) DeleteDomainEntryRequest(input *DeleteDomainEntryInput) (req *request.Request, output *DeleteDomainEntryOutput) {
+ op := &request.Operation{
+ Name: opDeleteDomainEntry,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteDomainEntryInput{}
+ }
+
+ output = &DeleteDomainEntryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteDomainEntry API operation for Amazon Lightsail.
+//
+// Deletes a specific domain entry.
+//
+// The delete domain entry operation supports tag-based access control via resource
+// tags applied to the resource identified by domain name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteDomainEntry for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDomainEntry
+func (c *Lightsail) DeleteDomainEntry(input *DeleteDomainEntryInput) (*DeleteDomainEntryOutput, error) {
+ req, out := c.DeleteDomainEntryRequest(input)
+ return out, req.Send()
+}
+
+// DeleteDomainEntryWithContext is the same as DeleteDomainEntry with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteDomainEntry for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteDomainEntryWithContext(ctx aws.Context, input *DeleteDomainEntryInput, opts ...request.Option) (*DeleteDomainEntryOutput, error) {
+ req, out := c.DeleteDomainEntryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteInstance = "DeleteInstance"
+
+// DeleteInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteInstance for more information on using the DeleteInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteInstanceRequest method.
+// req, resp := client.DeleteInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteInstance
+func (c *Lightsail) DeleteInstanceRequest(input *DeleteInstanceInput) (req *request.Request, output *DeleteInstanceOutput) {
+ op := &request.Operation{
+ Name: opDeleteInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteInstanceInput{}
+ }
+
+ output = &DeleteInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteInstance API operation for Amazon Lightsail.
+//
+// Deletes an Amazon Lightsail instance.
+//
+// The delete instance operation supports tag-based access control via resource
+// tags applied to the resource identified by instance name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteInstance for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteInstance
+func (c *Lightsail) DeleteInstance(input *DeleteInstanceInput) (*DeleteInstanceOutput, error) {
+ req, out := c.DeleteInstanceRequest(input)
+ return out, req.Send()
+}
+
+// DeleteInstanceWithContext is the same as DeleteInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteInstanceWithContext(ctx aws.Context, input *DeleteInstanceInput, opts ...request.Option) (*DeleteInstanceOutput, error) {
+ req, out := c.DeleteInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteInstanceSnapshot = "DeleteInstanceSnapshot"
+
+// DeleteInstanceSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteInstanceSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteInstanceSnapshot for more information on using the DeleteInstanceSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteInstanceSnapshotRequest method.
+// req, resp := client.DeleteInstanceSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteInstanceSnapshot
+func (c *Lightsail) DeleteInstanceSnapshotRequest(input *DeleteInstanceSnapshotInput) (req *request.Request, output *DeleteInstanceSnapshotOutput) {
+ op := &request.Operation{
+ Name: opDeleteInstanceSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteInstanceSnapshotInput{}
+ }
+
+ output = &DeleteInstanceSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteInstanceSnapshot API operation for Amazon Lightsail.
+//
+// Deletes a specific snapshot of a virtual private server (or instance).
+//
+// The delete instance snapshot operation supports tag-based access control
+// via resource tags applied to the resource identified by instance snapshot
+// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteInstanceSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteInstanceSnapshot
+func (c *Lightsail) DeleteInstanceSnapshot(input *DeleteInstanceSnapshotInput) (*DeleteInstanceSnapshotOutput, error) {
+ req, out := c.DeleteInstanceSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// DeleteInstanceSnapshotWithContext is the same as DeleteInstanceSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteInstanceSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteInstanceSnapshotWithContext(ctx aws.Context, input *DeleteInstanceSnapshotInput, opts ...request.Option) (*DeleteInstanceSnapshotOutput, error) {
+ req, out := c.DeleteInstanceSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteKeyPair = "DeleteKeyPair"
+
+// DeleteKeyPairRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteKeyPair operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteKeyPair for more information on using the DeleteKeyPair
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteKeyPairRequest method.
+// req, resp := client.DeleteKeyPairRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteKeyPair
+func (c *Lightsail) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) {
+ op := &request.Operation{
+ Name: opDeleteKeyPair,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteKeyPairInput{}
+ }
+
+ output = &DeleteKeyPairOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteKeyPair API operation for Amazon Lightsail.
+//
+// Deletes a specific SSH key pair.
+//
+// The delete key pair operation supports tag-based access control via resource
+// tags applied to the resource identified by key pair name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteKeyPair for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteKeyPair
+func (c *Lightsail) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) {
+ req, out := c.DeleteKeyPairRequest(input)
+ return out, req.Send()
+}
+
+// DeleteKeyPairWithContext is the same as DeleteKeyPair with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteKeyPair for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteKeyPairWithContext(ctx aws.Context, input *DeleteKeyPairInput, opts ...request.Option) (*DeleteKeyPairOutput, error) {
+ req, out := c.DeleteKeyPairRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteKnownHostKeys = "DeleteKnownHostKeys"
+
+// DeleteKnownHostKeysRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteKnownHostKeys operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteKnownHostKeys for more information on using the DeleteKnownHostKeys
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteKnownHostKeysRequest method.
+// req, resp := client.DeleteKnownHostKeysRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteKnownHostKeys
+func (c *Lightsail) DeleteKnownHostKeysRequest(input *DeleteKnownHostKeysInput) (req *request.Request, output *DeleteKnownHostKeysOutput) {
+ op := &request.Operation{
+ Name: opDeleteKnownHostKeys,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteKnownHostKeysInput{}
+ }
+
+ output = &DeleteKnownHostKeysOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteKnownHostKeys API operation for Amazon Lightsail.
+//
+// Deletes the known host key or certificate used by the Amazon Lightsail browser-based
+// SSH or RDP clients to authenticate an instance. This operation enables the
+// Lightsail browser-based SSH or RDP clients to connect to the instance after
+// a host key mismatch.
+//
+// Perform this operation only if you were expecting the host key or certificate
+// mismatch or if you are familiar with the new host key or certificate on the
+// instance. For more information, see Troubleshooting connection issues when
+// using the Amazon Lightsail browser-based SSH or RDP client (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-troubleshooting-browser-based-ssh-rdp-client-connection).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteKnownHostKeys for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteKnownHostKeys
+func (c *Lightsail) DeleteKnownHostKeys(input *DeleteKnownHostKeysInput) (*DeleteKnownHostKeysOutput, error) {
+ req, out := c.DeleteKnownHostKeysRequest(input)
+ return out, req.Send()
+}
+
+// DeleteKnownHostKeysWithContext is the same as DeleteKnownHostKeys with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteKnownHostKeys for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteKnownHostKeysWithContext(ctx aws.Context, input *DeleteKnownHostKeysInput, opts ...request.Option) (*DeleteKnownHostKeysOutput, error) {
+ req, out := c.DeleteKnownHostKeysRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteLoadBalancer = "DeleteLoadBalancer"
+
+// DeleteLoadBalancerRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteLoadBalancer operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteLoadBalancer for more information on using the DeleteLoadBalancer
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteLoadBalancerRequest method.
+// req, resp := client.DeleteLoadBalancerRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteLoadBalancer
+func (c *Lightsail) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *request.Request, output *DeleteLoadBalancerOutput) {
+ op := &request.Operation{
+ Name: opDeleteLoadBalancer,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteLoadBalancerInput{}
+ }
+
+ output = &DeleteLoadBalancerOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteLoadBalancer API operation for Amazon Lightsail.
+//
+// Deletes a Lightsail load balancer and all its associated SSL/TLS certificates.
+// Once the load balancer is deleted, you will need to create a new load balancer,
+// create a new certificate, and verify domain ownership again.
+//
+// The delete load balancer operation supports tag-based access control via
+// resource tags applied to the resource identified by load balancer name. For
+// more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteLoadBalancer for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteLoadBalancer
+func (c *Lightsail) DeleteLoadBalancer(input *DeleteLoadBalancerInput) (*DeleteLoadBalancerOutput, error) {
+ req, out := c.DeleteLoadBalancerRequest(input)
+ return out, req.Send()
+}
+
+// DeleteLoadBalancerWithContext is the same as DeleteLoadBalancer with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteLoadBalancer for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteLoadBalancerWithContext(ctx aws.Context, input *DeleteLoadBalancerInput, opts ...request.Option) (*DeleteLoadBalancerOutput, error) {
+ req, out := c.DeleteLoadBalancerRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteLoadBalancerTlsCertificate = "DeleteLoadBalancerTlsCertificate"
+
+// DeleteLoadBalancerTlsCertificateRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteLoadBalancerTlsCertificate operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteLoadBalancerTlsCertificate for more information on using the DeleteLoadBalancerTlsCertificate
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteLoadBalancerTlsCertificateRequest method.
+// req, resp := client.DeleteLoadBalancerTlsCertificateRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteLoadBalancerTlsCertificate
+func (c *Lightsail) DeleteLoadBalancerTlsCertificateRequest(input *DeleteLoadBalancerTlsCertificateInput) (req *request.Request, output *DeleteLoadBalancerTlsCertificateOutput) {
+ op := &request.Operation{
+ Name: opDeleteLoadBalancerTlsCertificate,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteLoadBalancerTlsCertificateInput{}
+ }
+
+ output = &DeleteLoadBalancerTlsCertificateOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteLoadBalancerTlsCertificate API operation for Amazon Lightsail.
+//
+// Deletes an SSL/TLS certificate associated with a Lightsail load balancer.
+//
+// The DeleteLoadBalancerTlsCertificate operation supports tag-based access
+// control via resource tags applied to the resource identified by load balancer
+// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteLoadBalancerTlsCertificate for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteLoadBalancerTlsCertificate
+func (c *Lightsail) DeleteLoadBalancerTlsCertificate(input *DeleteLoadBalancerTlsCertificateInput) (*DeleteLoadBalancerTlsCertificateOutput, error) {
+ req, out := c.DeleteLoadBalancerTlsCertificateRequest(input)
+ return out, req.Send()
+}
+
+// DeleteLoadBalancerTlsCertificateWithContext is the same as DeleteLoadBalancerTlsCertificate with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteLoadBalancerTlsCertificate for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteLoadBalancerTlsCertificateWithContext(ctx aws.Context, input *DeleteLoadBalancerTlsCertificateInput, opts ...request.Option) (*DeleteLoadBalancerTlsCertificateOutput, error) {
+ req, out := c.DeleteLoadBalancerTlsCertificateRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteRelationalDatabase = "DeleteRelationalDatabase"
+
+// DeleteRelationalDatabaseRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteRelationalDatabase operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteRelationalDatabase for more information on using the DeleteRelationalDatabase
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteRelationalDatabaseRequest method.
+// req, resp := client.DeleteRelationalDatabaseRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteRelationalDatabase
+func (c *Lightsail) DeleteRelationalDatabaseRequest(input *DeleteRelationalDatabaseInput) (req *request.Request, output *DeleteRelationalDatabaseOutput) {
+ op := &request.Operation{
+ Name: opDeleteRelationalDatabase,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteRelationalDatabaseInput{}
+ }
+
+ output = &DeleteRelationalDatabaseOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteRelationalDatabase API operation for Amazon Lightsail.
+//
+// Deletes a database in Amazon Lightsail.
+//
+// The delete relational database operation supports tag-based access control
+// via resource tags applied to the resource identified by relationalDatabaseName.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteRelationalDatabase for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteRelationalDatabase
+func (c *Lightsail) DeleteRelationalDatabase(input *DeleteRelationalDatabaseInput) (*DeleteRelationalDatabaseOutput, error) {
+ req, out := c.DeleteRelationalDatabaseRequest(input)
+ return out, req.Send()
+}
+
+// DeleteRelationalDatabaseWithContext is the same as DeleteRelationalDatabase with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteRelationalDatabase for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteRelationalDatabaseWithContext(ctx aws.Context, input *DeleteRelationalDatabaseInput, opts ...request.Option) (*DeleteRelationalDatabaseOutput, error) {
+ req, out := c.DeleteRelationalDatabaseRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteRelationalDatabaseSnapshot = "DeleteRelationalDatabaseSnapshot"
+
+// DeleteRelationalDatabaseSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteRelationalDatabaseSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteRelationalDatabaseSnapshot for more information on using the DeleteRelationalDatabaseSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteRelationalDatabaseSnapshotRequest method.
+// req, resp := client.DeleteRelationalDatabaseSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteRelationalDatabaseSnapshot
+func (c *Lightsail) DeleteRelationalDatabaseSnapshotRequest(input *DeleteRelationalDatabaseSnapshotInput) (req *request.Request, output *DeleteRelationalDatabaseSnapshotOutput) {
+ op := &request.Operation{
+ Name: opDeleteRelationalDatabaseSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteRelationalDatabaseSnapshotInput{}
+ }
+
+ output = &DeleteRelationalDatabaseSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteRelationalDatabaseSnapshot API operation for Amazon Lightsail.
+//
+// Deletes a database snapshot in Amazon Lightsail.
+//
+// The delete relational database snapshot operation supports tag-based access
+// control via resource tags applied to the resource identified by relationalDatabaseName.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DeleteRelationalDatabaseSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteRelationalDatabaseSnapshot
+func (c *Lightsail) DeleteRelationalDatabaseSnapshot(input *DeleteRelationalDatabaseSnapshotInput) (*DeleteRelationalDatabaseSnapshotOutput, error) {
+ req, out := c.DeleteRelationalDatabaseSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// DeleteRelationalDatabaseSnapshotWithContext is the same as DeleteRelationalDatabaseSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteRelationalDatabaseSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DeleteRelationalDatabaseSnapshotWithContext(ctx aws.Context, input *DeleteRelationalDatabaseSnapshotInput, opts ...request.Option) (*DeleteRelationalDatabaseSnapshotOutput, error) {
+ req, out := c.DeleteRelationalDatabaseSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDetachCertificateFromDistribution = "DetachCertificateFromDistribution"
+
+// DetachCertificateFromDistributionRequest generates a "aws/request.Request" representing the
+// client's request for the DetachCertificateFromDistribution operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DetachCertificateFromDistribution for more information on using the DetachCertificateFromDistribution
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DetachCertificateFromDistributionRequest method.
+// req, resp := client.DetachCertificateFromDistributionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachCertificateFromDistribution
+func (c *Lightsail) DetachCertificateFromDistributionRequest(input *DetachCertificateFromDistributionInput) (req *request.Request, output *DetachCertificateFromDistributionOutput) {
+ op := &request.Operation{
+ Name: opDetachCertificateFromDistribution,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DetachCertificateFromDistributionInput{}
+ }
+
+ output = &DetachCertificateFromDistributionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DetachCertificateFromDistribution API operation for Amazon Lightsail.
+//
+// Detaches an SSL/TLS certificate from your Amazon Lightsail content delivery
+// network (CDN) distribution.
+//
+// After the certificate is detached, your distribution stops accepting traffic
+// for all of the domains that are associated with the certificate.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DetachCertificateFromDistribution for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachCertificateFromDistribution
+func (c *Lightsail) DetachCertificateFromDistribution(input *DetachCertificateFromDistributionInput) (*DetachCertificateFromDistributionOutput, error) {
+ req, out := c.DetachCertificateFromDistributionRequest(input)
+ return out, req.Send()
+}
+
+// DetachCertificateFromDistributionWithContext is the same as DetachCertificateFromDistribution with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DetachCertificateFromDistribution for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DetachCertificateFromDistributionWithContext(ctx aws.Context, input *DetachCertificateFromDistributionInput, opts ...request.Option) (*DetachCertificateFromDistributionOutput, error) {
+ req, out := c.DetachCertificateFromDistributionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDetachDisk = "DetachDisk"
+
+// DetachDiskRequest generates a "aws/request.Request" representing the
+// client's request for the DetachDisk operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DetachDisk for more information on using the DetachDisk
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DetachDiskRequest method.
+// req, resp := client.DetachDiskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachDisk
+func (c *Lightsail) DetachDiskRequest(input *DetachDiskInput) (req *request.Request, output *DetachDiskOutput) {
+ op := &request.Operation{
+ Name: opDetachDisk,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DetachDiskInput{}
+ }
+
+ output = &DetachDiskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DetachDisk API operation for Amazon Lightsail.
+//
+// Detaches a stopped block storage disk from a Lightsail instance. Make sure
+// to unmount any file systems on the device within your operating system before
+// stopping the instance and detaching the disk.
+//
+// The detach disk operation supports tag-based access control via resource
+// tags applied to the resource identified by disk name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DetachDisk for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachDisk
+func (c *Lightsail) DetachDisk(input *DetachDiskInput) (*DetachDiskOutput, error) {
+ req, out := c.DetachDiskRequest(input)
+ return out, req.Send()
+}
+
+// DetachDiskWithContext is the same as DetachDisk with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DetachDisk for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DetachDiskWithContext(ctx aws.Context, input *DetachDiskInput, opts ...request.Option) (*DetachDiskOutput, error) {
+ req, out := c.DetachDiskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDetachInstancesFromLoadBalancer = "DetachInstancesFromLoadBalancer"
+
+// DetachInstancesFromLoadBalancerRequest generates a "aws/request.Request" representing the
+// client's request for the DetachInstancesFromLoadBalancer operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DetachInstancesFromLoadBalancer for more information on using the DetachInstancesFromLoadBalancer
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DetachInstancesFromLoadBalancerRequest method.
+// req, resp := client.DetachInstancesFromLoadBalancerRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachInstancesFromLoadBalancer
+func (c *Lightsail) DetachInstancesFromLoadBalancerRequest(input *DetachInstancesFromLoadBalancerInput) (req *request.Request, output *DetachInstancesFromLoadBalancerOutput) {
+ op := &request.Operation{
+ Name: opDetachInstancesFromLoadBalancer,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DetachInstancesFromLoadBalancerInput{}
+ }
+
+ output = &DetachInstancesFromLoadBalancerOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DetachInstancesFromLoadBalancer API operation for Amazon Lightsail.
+//
+// Detaches the specified instances from a Lightsail load balancer.
+//
+// This operation waits until the instances are no longer needed before they
+// are detached from the load balancer.
+//
+// The detach instances from load balancer operation supports tag-based access
+// control via resource tags applied to the resource identified by load balancer
+// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DetachInstancesFromLoadBalancer for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachInstancesFromLoadBalancer
+func (c *Lightsail) DetachInstancesFromLoadBalancer(input *DetachInstancesFromLoadBalancerInput) (*DetachInstancesFromLoadBalancerOutput, error) {
+ req, out := c.DetachInstancesFromLoadBalancerRequest(input)
+ return out, req.Send()
+}
+
+// DetachInstancesFromLoadBalancerWithContext is the same as DetachInstancesFromLoadBalancer with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DetachInstancesFromLoadBalancer for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DetachInstancesFromLoadBalancerWithContext(ctx aws.Context, input *DetachInstancesFromLoadBalancerInput, opts ...request.Option) (*DetachInstancesFromLoadBalancerOutput, error) {
+ req, out := c.DetachInstancesFromLoadBalancerRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDetachStaticIp = "DetachStaticIp"
+
+// DetachStaticIpRequest generates a "aws/request.Request" representing the
+// client's request for the DetachStaticIp operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DetachStaticIp for more information on using the DetachStaticIp
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DetachStaticIpRequest method.
+// req, resp := client.DetachStaticIpRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachStaticIp
+func (c *Lightsail) DetachStaticIpRequest(input *DetachStaticIpInput) (req *request.Request, output *DetachStaticIpOutput) {
+ op := &request.Operation{
+ Name: opDetachStaticIp,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DetachStaticIpInput{}
+ }
+
+ output = &DetachStaticIpOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DetachStaticIp API operation for Amazon Lightsail.
+//
+// Detaches a static IP from the Amazon Lightsail instance to which it is attached.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DetachStaticIp for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachStaticIp
+func (c *Lightsail) DetachStaticIp(input *DetachStaticIpInput) (*DetachStaticIpOutput, error) {
+ req, out := c.DetachStaticIpRequest(input)
+ return out, req.Send()
+}
+
+// DetachStaticIpWithContext is the same as DetachStaticIp with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DetachStaticIp for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DetachStaticIpWithContext(ctx aws.Context, input *DetachStaticIpInput, opts ...request.Option) (*DetachStaticIpOutput, error) {
+ req, out := c.DetachStaticIpRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDisableAddOn = "DisableAddOn"
+
+// DisableAddOnRequest generates a "aws/request.Request" representing the
+// client's request for the DisableAddOn operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisableAddOn for more information on using the DisableAddOn
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DisableAddOnRequest method.
+// req, resp := client.DisableAddOnRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DisableAddOn
+func (c *Lightsail) DisableAddOnRequest(input *DisableAddOnInput) (req *request.Request, output *DisableAddOnOutput) {
+ op := &request.Operation{
+ Name: opDisableAddOn,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisableAddOnInput{}
+ }
+
+ output = &DisableAddOnOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisableAddOn API operation for Amazon Lightsail.
+//
+// Disables an add-on for an Amazon Lightsail resource. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DisableAddOn for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DisableAddOn
+func (c *Lightsail) DisableAddOn(input *DisableAddOnInput) (*DisableAddOnOutput, error) {
+ req, out := c.DisableAddOnRequest(input)
+ return out, req.Send()
+}
+
+// DisableAddOnWithContext is the same as DisableAddOn with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisableAddOn for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DisableAddOnWithContext(ctx aws.Context, input *DisableAddOnInput, opts ...request.Option) (*DisableAddOnOutput, error) {
+ req, out := c.DisableAddOnRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDownloadDefaultKeyPair = "DownloadDefaultKeyPair"
+
+// DownloadDefaultKeyPairRequest generates a "aws/request.Request" representing the
+// client's request for the DownloadDefaultKeyPair operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DownloadDefaultKeyPair for more information on using the DownloadDefaultKeyPair
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DownloadDefaultKeyPairRequest method.
+// req, resp := client.DownloadDefaultKeyPairRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DownloadDefaultKeyPair
+func (c *Lightsail) DownloadDefaultKeyPairRequest(input *DownloadDefaultKeyPairInput) (req *request.Request, output *DownloadDefaultKeyPairOutput) {
+ op := &request.Operation{
+ Name: opDownloadDefaultKeyPair,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DownloadDefaultKeyPairInput{}
+ }
+
+ output = &DownloadDefaultKeyPairOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DownloadDefaultKeyPair API operation for Amazon Lightsail.
+//
+// Downloads the default SSH key pair from the user's account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation DownloadDefaultKeyPair for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DownloadDefaultKeyPair
+func (c *Lightsail) DownloadDefaultKeyPair(input *DownloadDefaultKeyPairInput) (*DownloadDefaultKeyPairOutput, error) {
+ req, out := c.DownloadDefaultKeyPairRequest(input)
+ return out, req.Send()
+}
+
+// DownloadDefaultKeyPairWithContext is the same as DownloadDefaultKeyPair with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DownloadDefaultKeyPair for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) DownloadDefaultKeyPairWithContext(ctx aws.Context, input *DownloadDefaultKeyPairInput, opts ...request.Option) (*DownloadDefaultKeyPairOutput, error) {
+ req, out := c.DownloadDefaultKeyPairRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opEnableAddOn = "EnableAddOn"
+
+// EnableAddOnRequest generates a "aws/request.Request" representing the
+// client's request for the EnableAddOn operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See EnableAddOn for more information on using the EnableAddOn
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the EnableAddOnRequest method.
+// req, resp := client.EnableAddOnRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/EnableAddOn
+func (c *Lightsail) EnableAddOnRequest(input *EnableAddOnInput) (req *request.Request, output *EnableAddOnOutput) {
+ op := &request.Operation{
+ Name: opEnableAddOn,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &EnableAddOnInput{}
+ }
+
+ output = &EnableAddOnOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// EnableAddOn API operation for Amazon Lightsail.
+//
+// Enables or modifies an add-on for an Amazon Lightsail resource. For more
+// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation EnableAddOn for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/EnableAddOn
+func (c *Lightsail) EnableAddOn(input *EnableAddOnInput) (*EnableAddOnOutput, error) {
+ req, out := c.EnableAddOnRequest(input)
+ return out, req.Send()
+}
+
+// EnableAddOnWithContext is the same as EnableAddOn with the addition of
+// the ability to pass a context and additional request options.
+//
+// See EnableAddOn for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) EnableAddOnWithContext(ctx aws.Context, input *EnableAddOnInput, opts ...request.Option) (*EnableAddOnOutput, error) {
+ req, out := c.EnableAddOnRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opExportSnapshot = "ExportSnapshot"
+
+// ExportSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the ExportSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ExportSnapshot for more information on using the ExportSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ExportSnapshotRequest method.
+// req, resp := client.ExportSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ExportSnapshot
+func (c *Lightsail) ExportSnapshotRequest(input *ExportSnapshotInput) (req *request.Request, output *ExportSnapshotOutput) {
+ op := &request.Operation{
+ Name: opExportSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ExportSnapshotInput{}
+ }
+
+ output = &ExportSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ExportSnapshot API operation for Amazon Lightsail.
+//
+// Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon
+// Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot
+// record that can be used with the create cloud formation stack operation to
+// create new Amazon EC2 instances.
+//
+// Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images
+// (AMIs), and the instance system disk appears as an Amazon Elastic Block Store
+// (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon
+// EBS volumes. Snapshots are exported to the same Amazon Web Services Region
+// in Amazon EC2 as the source Lightsail snapshot.
+//
+// The export snapshot operation supports tag-based access control via resource
+// tags applied to the resource identified by source snapshot name. For more
+// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Use the get instance snapshots or get disk snapshots operations to get a
+// list of snapshots that you can export to Amazon EC2.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation ExportSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ExportSnapshot
+func (c *Lightsail) ExportSnapshot(input *ExportSnapshotInput) (*ExportSnapshotOutput, error) {
+ req, out := c.ExportSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// ExportSnapshotWithContext is the same as ExportSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ExportSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) ExportSnapshotWithContext(ctx aws.Context, input *ExportSnapshotInput, opts ...request.Option) (*ExportSnapshotOutput, error) {
+ req, out := c.ExportSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetActiveNames = "GetActiveNames"
+
+// GetActiveNamesRequest generates a "aws/request.Request" representing the
+// client's request for the GetActiveNames operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetActiveNames for more information on using the GetActiveNames
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetActiveNamesRequest method.
+// req, resp := client.GetActiveNamesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetActiveNames
+func (c *Lightsail) GetActiveNamesRequest(input *GetActiveNamesInput) (req *request.Request, output *GetActiveNamesOutput) {
+ op := &request.Operation{
+ Name: opGetActiveNames,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetActiveNamesInput{}
+ }
+
+ output = &GetActiveNamesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetActiveNames API operation for Amazon Lightsail.
+//
+// Returns the names of all active (not deleted) resources.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetActiveNames for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetActiveNames
+func (c *Lightsail) GetActiveNames(input *GetActiveNamesInput) (*GetActiveNamesOutput, error) {
+ req, out := c.GetActiveNamesRequest(input)
+ return out, req.Send()
+}
+
+// GetActiveNamesWithContext is the same as GetActiveNames with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetActiveNames for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetActiveNamesWithContext(ctx aws.Context, input *GetActiveNamesInput, opts ...request.Option) (*GetActiveNamesOutput, error) {
+ req, out := c.GetActiveNamesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetAlarms = "GetAlarms"
+
+// GetAlarmsRequest generates a "aws/request.Request" representing the
+// client's request for the GetAlarms operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetAlarms for more information on using the GetAlarms
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetAlarmsRequest method.
+// req, resp := client.GetAlarmsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetAlarms
+func (c *Lightsail) GetAlarmsRequest(input *GetAlarmsInput) (req *request.Request, output *GetAlarmsOutput) {
+ op := &request.Operation{
+ Name: opGetAlarms,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetAlarmsInput{}
+ }
+
+ output = &GetAlarmsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetAlarms API operation for Amazon Lightsail.
+//
+// Returns information about the configured alarms. Specify an alarm name in
+// your request to return information about a specific alarm, or specify a monitored
+// resource name to return information about all alarms for a specific resource.
+//
+// An alarm is used to monitor a single metric for one of your resources. When
+// a metric condition is met, the alarm can notify you by email, SMS text message,
+// and a banner displayed on the Amazon Lightsail console. For more information,
+// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetAlarms for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetAlarms
+func (c *Lightsail) GetAlarms(input *GetAlarmsInput) (*GetAlarmsOutput, error) {
+ req, out := c.GetAlarmsRequest(input)
+ return out, req.Send()
+}
+
+// GetAlarmsWithContext is the same as GetAlarms with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetAlarms for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetAlarmsWithContext(ctx aws.Context, input *GetAlarmsInput, opts ...request.Option) (*GetAlarmsOutput, error) {
+ req, out := c.GetAlarmsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetAutoSnapshots = "GetAutoSnapshots"
+
+// GetAutoSnapshotsRequest generates a "aws/request.Request" representing the
+// client's request for the GetAutoSnapshots operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetAutoSnapshots for more information on using the GetAutoSnapshots
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetAutoSnapshotsRequest method.
+// req, resp := client.GetAutoSnapshotsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetAutoSnapshots
+func (c *Lightsail) GetAutoSnapshotsRequest(input *GetAutoSnapshotsInput) (req *request.Request, output *GetAutoSnapshotsOutput) {
+ op := &request.Operation{
+ Name: opGetAutoSnapshots,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetAutoSnapshotsInput{}
+ }
+
+ output = &GetAutoSnapshotsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetAutoSnapshots API operation for Amazon Lightsail.
+//
+// Returns the available automatic snapshots for an instance or disk. For more
+// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetAutoSnapshots for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetAutoSnapshots
+func (c *Lightsail) GetAutoSnapshots(input *GetAutoSnapshotsInput) (*GetAutoSnapshotsOutput, error) {
+ req, out := c.GetAutoSnapshotsRequest(input)
+ return out, req.Send()
+}
+
+// GetAutoSnapshotsWithContext is the same as GetAutoSnapshots with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetAutoSnapshots for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetAutoSnapshotsWithContext(ctx aws.Context, input *GetAutoSnapshotsInput, opts ...request.Option) (*GetAutoSnapshotsOutput, error) {
+ req, out := c.GetAutoSnapshotsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBlueprints = "GetBlueprints"
+
+// GetBlueprintsRequest generates a "aws/request.Request" representing the
+// client's request for the GetBlueprints operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBlueprints for more information on using the GetBlueprints
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBlueprintsRequest method.
+// req, resp := client.GetBlueprintsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBlueprints
+func (c *Lightsail) GetBlueprintsRequest(input *GetBlueprintsInput) (req *request.Request, output *GetBlueprintsOutput) {
+ op := &request.Operation{
+ Name: opGetBlueprints,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetBlueprintsInput{}
+ }
+
+ output = &GetBlueprintsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBlueprints API operation for Amazon Lightsail.
+//
+// Returns the list of available instance images, or blueprints. You can use
+// a blueprint to create a new instance already running a specific operating
+// system, as well as a preinstalled app or development stack. The software
+// each instance is running depends on the blueprint image you choose.
+//
+// Use active blueprints when creating new instances. Inactive blueprints are
+// listed to support customers with existing instances and are not necessarily
+// available to create new instances. Blueprints are marked inactive when they
+// become outdated due to operating system updates or new application releases.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetBlueprints for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBlueprints
+func (c *Lightsail) GetBlueprints(input *GetBlueprintsInput) (*GetBlueprintsOutput, error) {
+ req, out := c.GetBlueprintsRequest(input)
+ return out, req.Send()
+}
+
+// GetBlueprintsWithContext is the same as GetBlueprints with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBlueprints for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetBlueprintsWithContext(ctx aws.Context, input *GetBlueprintsInput, opts ...request.Option) (*GetBlueprintsOutput, error) {
+ req, out := c.GetBlueprintsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAccessKeys = "GetBucketAccessKeys"
+
+// GetBucketAccessKeysRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAccessKeys operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketAccessKeys for more information on using the GetBucketAccessKeys
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketAccessKeysRequest method.
+// req, resp := client.GetBucketAccessKeysRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketAccessKeys
+func (c *Lightsail) GetBucketAccessKeysRequest(input *GetBucketAccessKeysInput) (req *request.Request, output *GetBucketAccessKeysOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAccessKeys,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetBucketAccessKeysInput{}
+ }
+
+ output = &GetBucketAccessKeysOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAccessKeys API operation for Amazon Lightsail.
+//
+// Returns the existing access key IDs for the specified Amazon Lightsail bucket.
+//
+// This action does not return the secret access key value of an access key.
+// You can get a secret access key only when you create it from the response
+// of the CreateBucketAccessKey action. If you lose the secret access key, you
+// must create a new access key.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetBucketAccessKeys for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketAccessKeys
+func (c *Lightsail) GetBucketAccessKeys(input *GetBucketAccessKeysInput) (*GetBucketAccessKeysOutput, error) {
+ req, out := c.GetBucketAccessKeysRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAccessKeysWithContext is the same as GetBucketAccessKeys with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAccessKeys for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetBucketAccessKeysWithContext(ctx aws.Context, input *GetBucketAccessKeysInput, opts ...request.Option) (*GetBucketAccessKeysOutput, error) {
+ req, out := c.GetBucketAccessKeysRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketBundles = "GetBucketBundles"
+
+// GetBucketBundlesRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketBundles operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketBundles for more information on using the GetBucketBundles
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketBundlesRequest method.
+// req, resp := client.GetBucketBundlesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketBundles
+func (c *Lightsail) GetBucketBundlesRequest(input *GetBucketBundlesInput) (req *request.Request, output *GetBucketBundlesOutput) {
+ op := &request.Operation{
+ Name: opGetBucketBundles,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetBucketBundlesInput{}
+ }
+
+ output = &GetBucketBundlesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketBundles API operation for Amazon Lightsail.
+//
+// Returns the bundles that you can apply to a Amazon Lightsail bucket.
+//
+// The bucket bundle specifies the monthly cost, storage quota, and data transfer
+// quota for a bucket.
+//
+// Use the UpdateBucketBundle action to update the bundle for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetBucketBundles for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketBundles
+func (c *Lightsail) GetBucketBundles(input *GetBucketBundlesInput) (*GetBucketBundlesOutput, error) {
+ req, out := c.GetBucketBundlesRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketBundlesWithContext is the same as GetBucketBundles with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketBundles for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetBucketBundlesWithContext(ctx aws.Context, input *GetBucketBundlesInput, opts ...request.Option) (*GetBucketBundlesOutput, error) {
+ req, out := c.GetBucketBundlesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketMetricData = "GetBucketMetricData"
+
+// GetBucketMetricDataRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketMetricData operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketMetricData for more information on using the GetBucketMetricData
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketMetricDataRequest method.
+// req, resp := client.GetBucketMetricDataRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketMetricData
+func (c *Lightsail) GetBucketMetricDataRequest(input *GetBucketMetricDataInput) (req *request.Request, output *GetBucketMetricDataOutput) {
+ op := &request.Operation{
+ Name: opGetBucketMetricData,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetBucketMetricDataInput{}
+ }
+
+ output = &GetBucketMetricDataOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketMetricData API operation for Amazon Lightsail.
+//
+// Returns the data points of a specific metric for an Amazon Lightsail bucket.
+//
+// Metrics report the utilization of a bucket. View and collect metric data
+// regularly to monitor the number of objects stored in a bucket (including
+// object versions) and the storage space used by those objects.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetBucketMetricData for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketMetricData
+func (c *Lightsail) GetBucketMetricData(input *GetBucketMetricDataInput) (*GetBucketMetricDataOutput, error) {
+ req, out := c.GetBucketMetricDataRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketMetricDataWithContext is the same as GetBucketMetricData with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketMetricData for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetBucketMetricDataWithContext(ctx aws.Context, input *GetBucketMetricDataInput, opts ...request.Option) (*GetBucketMetricDataOutput, error) {
+ req, out := c.GetBucketMetricDataRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBuckets = "GetBuckets"
+
+// GetBucketsRequest generates a "aws/request.Request" representing the
+// client's request for the GetBuckets operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBuckets for more information on using the GetBuckets
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketsRequest method.
+// req, resp := client.GetBucketsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBuckets
+func (c *Lightsail) GetBucketsRequest(input *GetBucketsInput) (req *request.Request, output *GetBucketsOutput) {
+ op := &request.Operation{
+ Name: opGetBuckets,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetBucketsInput{}
+ }
+
+ output = &GetBucketsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBuckets API operation for Amazon Lightsail.
+//
+// Returns information about one or more Amazon Lightsail buckets.
+//
+// For more information about buckets, see Buckets in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/buckets-in-amazon-lightsail)
+// in the Amazon Lightsail Developer Guide..
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetBuckets for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBuckets
+func (c *Lightsail) GetBuckets(input *GetBucketsInput) (*GetBucketsOutput, error) {
+ req, out := c.GetBucketsRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketsWithContext is the same as GetBuckets with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBuckets for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetBucketsWithContext(ctx aws.Context, input *GetBucketsInput, opts ...request.Option) (*GetBucketsOutput, error) {
+ req, out := c.GetBucketsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBundles = "GetBundles"
+
+// GetBundlesRequest generates a "aws/request.Request" representing the
+// client's request for the GetBundles operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBundles for more information on using the GetBundles
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBundlesRequest method.
+// req, resp := client.GetBundlesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBundles
+func (c *Lightsail) GetBundlesRequest(input *GetBundlesInput) (req *request.Request, output *GetBundlesOutput) {
+ op := &request.Operation{
+ Name: opGetBundles,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetBundlesInput{}
+ }
+
+ output = &GetBundlesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBundles API operation for Amazon Lightsail.
+//
+// Returns the list of bundles that are available for purchase. A bundle describes
+// the specs for your virtual private server (or instance).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetBundles for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBundles
+func (c *Lightsail) GetBundles(input *GetBundlesInput) (*GetBundlesOutput, error) {
+ req, out := c.GetBundlesRequest(input)
+ return out, req.Send()
+}
+
+// GetBundlesWithContext is the same as GetBundles with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBundles for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetBundlesWithContext(ctx aws.Context, input *GetBundlesInput, opts ...request.Option) (*GetBundlesOutput, error) {
+ req, out := c.GetBundlesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetCertificates = "GetCertificates"
+
+// GetCertificatesRequest generates a "aws/request.Request" representing the
+// client's request for the GetCertificates operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetCertificates for more information on using the GetCertificates
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetCertificatesRequest method.
+// req, resp := client.GetCertificatesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCertificates
+func (c *Lightsail) GetCertificatesRequest(input *GetCertificatesInput) (req *request.Request, output *GetCertificatesOutput) {
+ op := &request.Operation{
+ Name: opGetCertificates,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCertificatesInput{}
+ }
+
+ output = &GetCertificatesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetCertificates API operation for Amazon Lightsail.
+//
+// Returns information about one or more Amazon Lightsail SSL/TLS certificates.
+//
+// To get a summary of a certificate, ommit includeCertificateDetails from your
+// request. The response will include only the certificate Amazon Resource Name
+// (ARN), certificate name, domain name, and tags.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetCertificates for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCertificates
+func (c *Lightsail) GetCertificates(input *GetCertificatesInput) (*GetCertificatesOutput, error) {
+ req, out := c.GetCertificatesRequest(input)
+ return out, req.Send()
+}
+
+// GetCertificatesWithContext is the same as GetCertificates with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCertificates for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetCertificatesWithContext(ctx aws.Context, input *GetCertificatesInput, opts ...request.Option) (*GetCertificatesOutput, error) {
+ req, out := c.GetCertificatesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetCloudFormationStackRecords = "GetCloudFormationStackRecords"
+
+// GetCloudFormationStackRecordsRequest generates a "aws/request.Request" representing the
+// client's request for the GetCloudFormationStackRecords operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetCloudFormationStackRecords for more information on using the GetCloudFormationStackRecords
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetCloudFormationStackRecordsRequest method.
+// req, resp := client.GetCloudFormationStackRecordsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCloudFormationStackRecords
+func (c *Lightsail) GetCloudFormationStackRecordsRequest(input *GetCloudFormationStackRecordsInput) (req *request.Request, output *GetCloudFormationStackRecordsOutput) {
+ op := &request.Operation{
+ Name: opGetCloudFormationStackRecords,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCloudFormationStackRecordsInput{}
+ }
+
+ output = &GetCloudFormationStackRecordsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetCloudFormationStackRecords API operation for Amazon Lightsail.
+//
+// Returns the CloudFormation stack record created as a result of the create
+// cloud formation stack operation.
+//
+// An AWS CloudFormation stack is used to create a new Amazon EC2 instance from
+// an exported Lightsail snapshot.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetCloudFormationStackRecords for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCloudFormationStackRecords
+func (c *Lightsail) GetCloudFormationStackRecords(input *GetCloudFormationStackRecordsInput) (*GetCloudFormationStackRecordsOutput, error) {
+ req, out := c.GetCloudFormationStackRecordsRequest(input)
+ return out, req.Send()
+}
+
+// GetCloudFormationStackRecordsWithContext is the same as GetCloudFormationStackRecords with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCloudFormationStackRecords for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetCloudFormationStackRecordsWithContext(ctx aws.Context, input *GetCloudFormationStackRecordsInput, opts ...request.Option) (*GetCloudFormationStackRecordsOutput, error) {
+ req, out := c.GetCloudFormationStackRecordsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContactMethods = "GetContactMethods"
+
+// GetContactMethodsRequest generates a "aws/request.Request" representing the
+// client's request for the GetContactMethods operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContactMethods for more information on using the GetContactMethods
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContactMethodsRequest method.
+// req, resp := client.GetContactMethodsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContactMethods
+func (c *Lightsail) GetContactMethodsRequest(input *GetContactMethodsInput) (req *request.Request, output *GetContactMethodsOutput) {
+ op := &request.Operation{
+ Name: opGetContactMethods,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContactMethodsInput{}
+ }
+
+ output = &GetContactMethodsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContactMethods API operation for Amazon Lightsail.
+//
+// Returns information about the configured contact methods. Specify a protocol
+// in your request to return information about a specific contact method.
+//
+// A contact method is used to send you notifications about your Amazon Lightsail
+// resources. You can add one email address and one mobile phone number contact
+// method in each AWS Region. However, SMS text messaging is not supported in
+// some AWS Regions, and SMS text messages cannot be sent to some countries/regions.
+// For more information, see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContactMethods for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContactMethods
+func (c *Lightsail) GetContactMethods(input *GetContactMethodsInput) (*GetContactMethodsOutput, error) {
+ req, out := c.GetContactMethodsRequest(input)
+ return out, req.Send()
+}
+
+// GetContactMethodsWithContext is the same as GetContactMethods with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContactMethods for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContactMethodsWithContext(ctx aws.Context, input *GetContactMethodsInput, opts ...request.Option) (*GetContactMethodsOutput, error) {
+ req, out := c.GetContactMethodsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContainerAPIMetadata = "GetContainerAPIMetadata"
+
+// GetContainerAPIMetadataRequest generates a "aws/request.Request" representing the
+// client's request for the GetContainerAPIMetadata operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContainerAPIMetadata for more information on using the GetContainerAPIMetadata
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContainerAPIMetadataRequest method.
+// req, resp := client.GetContainerAPIMetadataRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerAPIMetadata
+func (c *Lightsail) GetContainerAPIMetadataRequest(input *GetContainerAPIMetadataInput) (req *request.Request, output *GetContainerAPIMetadataOutput) {
+ op := &request.Operation{
+ Name: opGetContainerAPIMetadata,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContainerAPIMetadataInput{}
+ }
+
+ output = &GetContainerAPIMetadataOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContainerAPIMetadata API operation for Amazon Lightsail.
+//
+// Returns information about Amazon Lightsail containers, such as the current
+// version of the Lightsail Control (lightsailctl) plugin.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContainerAPIMetadata for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerAPIMetadata
+func (c *Lightsail) GetContainerAPIMetadata(input *GetContainerAPIMetadataInput) (*GetContainerAPIMetadataOutput, error) {
+ req, out := c.GetContainerAPIMetadataRequest(input)
+ return out, req.Send()
+}
+
+// GetContainerAPIMetadataWithContext is the same as GetContainerAPIMetadata with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContainerAPIMetadata for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContainerAPIMetadataWithContext(ctx aws.Context, input *GetContainerAPIMetadataInput, opts ...request.Option) (*GetContainerAPIMetadataOutput, error) {
+ req, out := c.GetContainerAPIMetadataRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContainerImages = "GetContainerImages"
+
+// GetContainerImagesRequest generates a "aws/request.Request" representing the
+// client's request for the GetContainerImages operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContainerImages for more information on using the GetContainerImages
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContainerImagesRequest method.
+// req, resp := client.GetContainerImagesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerImages
+func (c *Lightsail) GetContainerImagesRequest(input *GetContainerImagesInput) (req *request.Request, output *GetContainerImagesOutput) {
+ op := &request.Operation{
+ Name: opGetContainerImages,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContainerImagesInput{}
+ }
+
+ output = &GetContainerImagesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContainerImages API operation for Amazon Lightsail.
+//
+// Returns the container images that are registered to your Amazon Lightsail
+// container service.
+//
+// If you created a deployment on your Lightsail container service that uses
+// container images from a public registry like Docker Hub, those images are
+// not returned as part of this action. Those images are not registered to your
+// Lightsail container service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContainerImages for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerImages
+func (c *Lightsail) GetContainerImages(input *GetContainerImagesInput) (*GetContainerImagesOutput, error) {
+ req, out := c.GetContainerImagesRequest(input)
+ return out, req.Send()
+}
+
+// GetContainerImagesWithContext is the same as GetContainerImages with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContainerImages for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContainerImagesWithContext(ctx aws.Context, input *GetContainerImagesInput, opts ...request.Option) (*GetContainerImagesOutput, error) {
+ req, out := c.GetContainerImagesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContainerLog = "GetContainerLog"
+
+// GetContainerLogRequest generates a "aws/request.Request" representing the
+// client's request for the GetContainerLog operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContainerLog for more information on using the GetContainerLog
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContainerLogRequest method.
+// req, resp := client.GetContainerLogRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerLog
+func (c *Lightsail) GetContainerLogRequest(input *GetContainerLogInput) (req *request.Request, output *GetContainerLogOutput) {
+ op := &request.Operation{
+ Name: opGetContainerLog,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContainerLogInput{}
+ }
+
+ output = &GetContainerLogOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContainerLog API operation for Amazon Lightsail.
+//
+// Returns the log events of a container of your Amazon Lightsail container
+// service.
+//
+// If your container service has more than one node (i.e., a scale greater than
+// 1), then the log events that are returned for the specified container are
+// merged from all nodes on your container service.
+//
+// Container logs are retained for a certain amount of time. For more information,
+// see Amazon Lightsail endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/lightsail.html)
+// in the AWS General Reference.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContainerLog for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerLog
+func (c *Lightsail) GetContainerLog(input *GetContainerLogInput) (*GetContainerLogOutput, error) {
+ req, out := c.GetContainerLogRequest(input)
+ return out, req.Send()
+}
+
+// GetContainerLogWithContext is the same as GetContainerLog with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContainerLog for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContainerLogWithContext(ctx aws.Context, input *GetContainerLogInput, opts ...request.Option) (*GetContainerLogOutput, error) {
+ req, out := c.GetContainerLogRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContainerServiceDeployments = "GetContainerServiceDeployments"
+
+// GetContainerServiceDeploymentsRequest generates a "aws/request.Request" representing the
+// client's request for the GetContainerServiceDeployments operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContainerServiceDeployments for more information on using the GetContainerServiceDeployments
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContainerServiceDeploymentsRequest method.
+// req, resp := client.GetContainerServiceDeploymentsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServiceDeployments
+func (c *Lightsail) GetContainerServiceDeploymentsRequest(input *GetContainerServiceDeploymentsInput) (req *request.Request, output *GetContainerServiceDeploymentsOutput) {
+ op := &request.Operation{
+ Name: opGetContainerServiceDeployments,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContainerServiceDeploymentsInput{}
+ }
+
+ output = &GetContainerServiceDeploymentsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContainerServiceDeployments API operation for Amazon Lightsail.
+//
+// Returns the deployments for your Amazon Lightsail container service
+//
+// A deployment specifies the settings, such as the ports and launch command,
+// of containers that are deployed to your container service.
+//
+// The deployments are ordered by version in ascending order. The newest version
+// is listed at the top of the response.
+//
+// A set number of deployments are kept before the oldest one is replaced with
+// the newest one. For more information, see Amazon Lightsail endpoints and
+// quotas (https://docs.aws.amazon.com/general/latest/gr/lightsail.html) in
+// the AWS General Reference.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContainerServiceDeployments for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServiceDeployments
+func (c *Lightsail) GetContainerServiceDeployments(input *GetContainerServiceDeploymentsInput) (*GetContainerServiceDeploymentsOutput, error) {
+ req, out := c.GetContainerServiceDeploymentsRequest(input)
+ return out, req.Send()
+}
+
+// GetContainerServiceDeploymentsWithContext is the same as GetContainerServiceDeployments with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContainerServiceDeployments for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContainerServiceDeploymentsWithContext(ctx aws.Context, input *GetContainerServiceDeploymentsInput, opts ...request.Option) (*GetContainerServiceDeploymentsOutput, error) {
+ req, out := c.GetContainerServiceDeploymentsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContainerServiceMetricData = "GetContainerServiceMetricData"
+
+// GetContainerServiceMetricDataRequest generates a "aws/request.Request" representing the
+// client's request for the GetContainerServiceMetricData operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContainerServiceMetricData for more information on using the GetContainerServiceMetricData
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContainerServiceMetricDataRequest method.
+// req, resp := client.GetContainerServiceMetricDataRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServiceMetricData
+func (c *Lightsail) GetContainerServiceMetricDataRequest(input *GetContainerServiceMetricDataInput) (req *request.Request, output *GetContainerServiceMetricDataOutput) {
+ op := &request.Operation{
+ Name: opGetContainerServiceMetricData,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContainerServiceMetricDataInput{}
+ }
+
+ output = &GetContainerServiceMetricDataOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContainerServiceMetricData API operation for Amazon Lightsail.
+//
+// Returns the data points of a specific metric of your Amazon Lightsail container
+// service.
+//
+// Metrics report the utilization of your resources. Monitor and collect metric
+// data regularly to maintain the reliability, availability, and performance
+// of your resources.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContainerServiceMetricData for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServiceMetricData
+func (c *Lightsail) GetContainerServiceMetricData(input *GetContainerServiceMetricDataInput) (*GetContainerServiceMetricDataOutput, error) {
+ req, out := c.GetContainerServiceMetricDataRequest(input)
+ return out, req.Send()
+}
+
+// GetContainerServiceMetricDataWithContext is the same as GetContainerServiceMetricData with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContainerServiceMetricData for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContainerServiceMetricDataWithContext(ctx aws.Context, input *GetContainerServiceMetricDataInput, opts ...request.Option) (*GetContainerServiceMetricDataOutput, error) {
+ req, out := c.GetContainerServiceMetricDataRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContainerServicePowers = "GetContainerServicePowers"
+
+// GetContainerServicePowersRequest generates a "aws/request.Request" representing the
+// client's request for the GetContainerServicePowers operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContainerServicePowers for more information on using the GetContainerServicePowers
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContainerServicePowersRequest method.
+// req, resp := client.GetContainerServicePowersRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServicePowers
+func (c *Lightsail) GetContainerServicePowersRequest(input *GetContainerServicePowersInput) (req *request.Request, output *GetContainerServicePowersOutput) {
+ op := &request.Operation{
+ Name: opGetContainerServicePowers,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContainerServicePowersInput{}
+ }
+
+ output = &GetContainerServicePowersOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContainerServicePowers API operation for Amazon Lightsail.
+//
+// Returns the list of powers that can be specified for your Amazon Lightsail
+// container services.
+//
+// The power specifies the amount of memory, the number of vCPUs, and the base
+// price of the container service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContainerServicePowers for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServicePowers
+func (c *Lightsail) GetContainerServicePowers(input *GetContainerServicePowersInput) (*GetContainerServicePowersOutput, error) {
+ req, out := c.GetContainerServicePowersRequest(input)
+ return out, req.Send()
+}
+
+// GetContainerServicePowersWithContext is the same as GetContainerServicePowers with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContainerServicePowers for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContainerServicePowersWithContext(ctx aws.Context, input *GetContainerServicePowersInput, opts ...request.Option) (*GetContainerServicePowersOutput, error) {
+ req, out := c.GetContainerServicePowersRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetContainerServices = "GetContainerServices"
+
+// GetContainerServicesRequest generates a "aws/request.Request" representing the
+// client's request for the GetContainerServices operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetContainerServices for more information on using the GetContainerServices
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetContainerServicesRequest method.
+// req, resp := client.GetContainerServicesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServices
+func (c *Lightsail) GetContainerServicesRequest(input *GetContainerServicesInput) (req *request.Request, output *GetContainerServicesOutput) {
+ op := &request.Operation{
+ Name: opGetContainerServices,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetContainerServicesInput{}
+ }
+
+ output = &GetContainerServicesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetContainerServices API operation for Amazon Lightsail.
+//
+// Returns information about one or more of your Amazon Lightsail container
+// services.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetContainerServices for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetContainerServices
+func (c *Lightsail) GetContainerServices(input *GetContainerServicesInput) (*GetContainerServicesOutput, error) {
+ req, out := c.GetContainerServicesRequest(input)
+ return out, req.Send()
+}
+
+// GetContainerServicesWithContext is the same as GetContainerServices with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetContainerServices for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetContainerServicesWithContext(ctx aws.Context, input *GetContainerServicesInput, opts ...request.Option) (*GetContainerServicesOutput, error) {
+ req, out := c.GetContainerServicesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDisk = "GetDisk"
+
+// GetDiskRequest generates a "aws/request.Request" representing the
+// client's request for the GetDisk operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDisk for more information on using the GetDisk
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDiskRequest method.
+// req, resp := client.GetDiskRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDisk
+func (c *Lightsail) GetDiskRequest(input *GetDiskInput) (req *request.Request, output *GetDiskOutput) {
+ op := &request.Operation{
+ Name: opGetDisk,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDiskInput{}
+ }
+
+ output = &GetDiskOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDisk API operation for Amazon Lightsail.
+//
+// Returns information about a specific block storage disk.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDisk for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDisk
+func (c *Lightsail) GetDisk(input *GetDiskInput) (*GetDiskOutput, error) {
+ req, out := c.GetDiskRequest(input)
+ return out, req.Send()
+}
+
+// GetDiskWithContext is the same as GetDisk with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDisk for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDiskWithContext(ctx aws.Context, input *GetDiskInput, opts ...request.Option) (*GetDiskOutput, error) {
+ req, out := c.GetDiskRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDiskSnapshot = "GetDiskSnapshot"
+
+// GetDiskSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the GetDiskSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDiskSnapshot for more information on using the GetDiskSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDiskSnapshotRequest method.
+// req, resp := client.GetDiskSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDiskSnapshot
+func (c *Lightsail) GetDiskSnapshotRequest(input *GetDiskSnapshotInput) (req *request.Request, output *GetDiskSnapshotOutput) {
+ op := &request.Operation{
+ Name: opGetDiskSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDiskSnapshotInput{}
+ }
+
+ output = &GetDiskSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDiskSnapshot API operation for Amazon Lightsail.
+//
+// Returns information about a specific block storage disk snapshot.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDiskSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDiskSnapshot
+func (c *Lightsail) GetDiskSnapshot(input *GetDiskSnapshotInput) (*GetDiskSnapshotOutput, error) {
+ req, out := c.GetDiskSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// GetDiskSnapshotWithContext is the same as GetDiskSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDiskSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDiskSnapshotWithContext(ctx aws.Context, input *GetDiskSnapshotInput, opts ...request.Option) (*GetDiskSnapshotOutput, error) {
+ req, out := c.GetDiskSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDiskSnapshots = "GetDiskSnapshots"
+
+// GetDiskSnapshotsRequest generates a "aws/request.Request" representing the
+// client's request for the GetDiskSnapshots operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDiskSnapshots for more information on using the GetDiskSnapshots
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDiskSnapshotsRequest method.
+// req, resp := client.GetDiskSnapshotsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDiskSnapshots
+func (c *Lightsail) GetDiskSnapshotsRequest(input *GetDiskSnapshotsInput) (req *request.Request, output *GetDiskSnapshotsOutput) {
+ op := &request.Operation{
+ Name: opGetDiskSnapshots,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDiskSnapshotsInput{}
+ }
+
+ output = &GetDiskSnapshotsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDiskSnapshots API operation for Amazon Lightsail.
+//
+// Returns information about all block storage disk snapshots in your AWS account
+// and region.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDiskSnapshots for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDiskSnapshots
+func (c *Lightsail) GetDiskSnapshots(input *GetDiskSnapshotsInput) (*GetDiskSnapshotsOutput, error) {
+ req, out := c.GetDiskSnapshotsRequest(input)
+ return out, req.Send()
+}
+
+// GetDiskSnapshotsWithContext is the same as GetDiskSnapshots with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDiskSnapshots for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDiskSnapshotsWithContext(ctx aws.Context, input *GetDiskSnapshotsInput, opts ...request.Option) (*GetDiskSnapshotsOutput, error) {
+ req, out := c.GetDiskSnapshotsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDisks = "GetDisks"
+
+// GetDisksRequest generates a "aws/request.Request" representing the
+// client's request for the GetDisks operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDisks for more information on using the GetDisks
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDisksRequest method.
+// req, resp := client.GetDisksRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDisks
+func (c *Lightsail) GetDisksRequest(input *GetDisksInput) (req *request.Request, output *GetDisksOutput) {
+ op := &request.Operation{
+ Name: opGetDisks,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDisksInput{}
+ }
+
+ output = &GetDisksOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDisks API operation for Amazon Lightsail.
+//
+// Returns information about all block storage disks in your AWS account and
+// region.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDisks for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDisks
+func (c *Lightsail) GetDisks(input *GetDisksInput) (*GetDisksOutput, error) {
+ req, out := c.GetDisksRequest(input)
+ return out, req.Send()
+}
+
+// GetDisksWithContext is the same as GetDisks with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDisks for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDisksWithContext(ctx aws.Context, input *GetDisksInput, opts ...request.Option) (*GetDisksOutput, error) {
+ req, out := c.GetDisksRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDistributionBundles = "GetDistributionBundles"
+
+// GetDistributionBundlesRequest generates a "aws/request.Request" representing the
+// client's request for the GetDistributionBundles operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDistributionBundles for more information on using the GetDistributionBundles
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDistributionBundlesRequest method.
+// req, resp := client.GetDistributionBundlesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionBundles
+func (c *Lightsail) GetDistributionBundlesRequest(input *GetDistributionBundlesInput) (req *request.Request, output *GetDistributionBundlesOutput) {
+ op := &request.Operation{
+ Name: opGetDistributionBundles,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDistributionBundlesInput{}
+ }
+
+ output = &GetDistributionBundlesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDistributionBundles API operation for Amazon Lightsail.
+//
+// Returns the bundles that can be applied to your Amazon Lightsail content
+// delivery network (CDN) distributions.
+//
+// A distribution bundle specifies the monthly network transfer quota and monthly
+// cost of your dsitribution.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDistributionBundles for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionBundles
+func (c *Lightsail) GetDistributionBundles(input *GetDistributionBundlesInput) (*GetDistributionBundlesOutput, error) {
+ req, out := c.GetDistributionBundlesRequest(input)
+ return out, req.Send()
+}
+
+// GetDistributionBundlesWithContext is the same as GetDistributionBundles with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDistributionBundles for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDistributionBundlesWithContext(ctx aws.Context, input *GetDistributionBundlesInput, opts ...request.Option) (*GetDistributionBundlesOutput, error) {
+ req, out := c.GetDistributionBundlesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDistributionLatestCacheReset = "GetDistributionLatestCacheReset"
+
+// GetDistributionLatestCacheResetRequest generates a "aws/request.Request" representing the
+// client's request for the GetDistributionLatestCacheReset operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDistributionLatestCacheReset for more information on using the GetDistributionLatestCacheReset
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDistributionLatestCacheResetRequest method.
+// req, resp := client.GetDistributionLatestCacheResetRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionLatestCacheReset
+func (c *Lightsail) GetDistributionLatestCacheResetRequest(input *GetDistributionLatestCacheResetInput) (req *request.Request, output *GetDistributionLatestCacheResetOutput) {
+ op := &request.Operation{
+ Name: opGetDistributionLatestCacheReset,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDistributionLatestCacheResetInput{}
+ }
+
+ output = &GetDistributionLatestCacheResetOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDistributionLatestCacheReset API operation for Amazon Lightsail.
+//
+// Returns the timestamp and status of the last cache reset of a specific Amazon
+// Lightsail content delivery network (CDN) distribution.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDistributionLatestCacheReset for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionLatestCacheReset
+func (c *Lightsail) GetDistributionLatestCacheReset(input *GetDistributionLatestCacheResetInput) (*GetDistributionLatestCacheResetOutput, error) {
+ req, out := c.GetDistributionLatestCacheResetRequest(input)
+ return out, req.Send()
+}
+
+// GetDistributionLatestCacheResetWithContext is the same as GetDistributionLatestCacheReset with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDistributionLatestCacheReset for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDistributionLatestCacheResetWithContext(ctx aws.Context, input *GetDistributionLatestCacheResetInput, opts ...request.Option) (*GetDistributionLatestCacheResetOutput, error) {
+ req, out := c.GetDistributionLatestCacheResetRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDistributionMetricData = "GetDistributionMetricData"
+
+// GetDistributionMetricDataRequest generates a "aws/request.Request" representing the
+// client's request for the GetDistributionMetricData operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDistributionMetricData for more information on using the GetDistributionMetricData
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDistributionMetricDataRequest method.
+// req, resp := client.GetDistributionMetricDataRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionMetricData
+func (c *Lightsail) GetDistributionMetricDataRequest(input *GetDistributionMetricDataInput) (req *request.Request, output *GetDistributionMetricDataOutput) {
+ op := &request.Operation{
+ Name: opGetDistributionMetricData,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDistributionMetricDataInput{}
+ }
+
+ output = &GetDistributionMetricDataOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDistributionMetricData API operation for Amazon Lightsail.
+//
+// Returns the data points of a specific metric for an Amazon Lightsail content
+// delivery network (CDN) distribution.
+//
+// Metrics report the utilization of your resources, and the error counts generated
+// by them. Monitor and collect metric data regularly to maintain the reliability,
+// availability, and performance of your resources.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDistributionMetricData for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionMetricData
+func (c *Lightsail) GetDistributionMetricData(input *GetDistributionMetricDataInput) (*GetDistributionMetricDataOutput, error) {
+ req, out := c.GetDistributionMetricDataRequest(input)
+ return out, req.Send()
+}
+
+// GetDistributionMetricDataWithContext is the same as GetDistributionMetricData with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDistributionMetricData for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDistributionMetricDataWithContext(ctx aws.Context, input *GetDistributionMetricDataInput, opts ...request.Option) (*GetDistributionMetricDataOutput, error) {
+ req, out := c.GetDistributionMetricDataRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDistributions = "GetDistributions"
+
+// GetDistributionsRequest generates a "aws/request.Request" representing the
+// client's request for the GetDistributions operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDistributions for more information on using the GetDistributions
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDistributionsRequest method.
+// req, resp := client.GetDistributionsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributions
+func (c *Lightsail) GetDistributionsRequest(input *GetDistributionsInput) (req *request.Request, output *GetDistributionsOutput) {
+ op := &request.Operation{
+ Name: opGetDistributions,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDistributionsInput{}
+ }
+
+ output = &GetDistributionsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDistributions API operation for Amazon Lightsail.
+//
+// Returns information about one or more of your Amazon Lightsail content delivery
+// network (CDN) distributions.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDistributions for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributions
+func (c *Lightsail) GetDistributions(input *GetDistributionsInput) (*GetDistributionsOutput, error) {
+ req, out := c.GetDistributionsRequest(input)
+ return out, req.Send()
+}
+
+// GetDistributionsWithContext is the same as GetDistributions with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDistributions for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDistributionsWithContext(ctx aws.Context, input *GetDistributionsInput, opts ...request.Option) (*GetDistributionsOutput, error) {
+ req, out := c.GetDistributionsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDomain = "GetDomain"
+
+// GetDomainRequest generates a "aws/request.Request" representing the
+// client's request for the GetDomain operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDomain for more information on using the GetDomain
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDomainRequest method.
+// req, resp := client.GetDomainRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomain
+func (c *Lightsail) GetDomainRequest(input *GetDomainInput) (req *request.Request, output *GetDomainOutput) {
+ op := &request.Operation{
+ Name: opGetDomain,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDomainInput{}
+ }
+
+ output = &GetDomainOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDomain API operation for Amazon Lightsail.
+//
+// Returns information about a specific domain recordset.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDomain for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomain
+func (c *Lightsail) GetDomain(input *GetDomainInput) (*GetDomainOutput, error) {
+ req, out := c.GetDomainRequest(input)
+ return out, req.Send()
+}
+
+// GetDomainWithContext is the same as GetDomain with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDomain for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDomainWithContext(ctx aws.Context, input *GetDomainInput, opts ...request.Option) (*GetDomainOutput, error) {
+ req, out := c.GetDomainRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDomains = "GetDomains"
+
+// GetDomainsRequest generates a "aws/request.Request" representing the
+// client's request for the GetDomains operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDomains for more information on using the GetDomains
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDomainsRequest method.
+// req, resp := client.GetDomainsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomains
+func (c *Lightsail) GetDomainsRequest(input *GetDomainsInput) (req *request.Request, output *GetDomainsOutput) {
+ op := &request.Operation{
+ Name: opGetDomains,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDomainsInput{}
+ }
+
+ output = &GetDomainsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDomains API operation for Amazon Lightsail.
+//
+// Returns a list of all domains in the user's account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetDomains for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomains
+func (c *Lightsail) GetDomains(input *GetDomainsInput) (*GetDomainsOutput, error) {
+ req, out := c.GetDomainsRequest(input)
+ return out, req.Send()
+}
+
+// GetDomainsWithContext is the same as GetDomains with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDomains for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetDomainsWithContext(ctx aws.Context, input *GetDomainsInput, opts ...request.Option) (*GetDomainsOutput, error) {
+ req, out := c.GetDomainsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetExportSnapshotRecords = "GetExportSnapshotRecords"
+
+// GetExportSnapshotRecordsRequest generates a "aws/request.Request" representing the
+// client's request for the GetExportSnapshotRecords operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetExportSnapshotRecords for more information on using the GetExportSnapshotRecords
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetExportSnapshotRecordsRequest method.
+// req, resp := client.GetExportSnapshotRecordsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetExportSnapshotRecords
+func (c *Lightsail) GetExportSnapshotRecordsRequest(input *GetExportSnapshotRecordsInput) (req *request.Request, output *GetExportSnapshotRecordsOutput) {
+ op := &request.Operation{
+ Name: opGetExportSnapshotRecords,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetExportSnapshotRecordsInput{}
+ }
+
+ output = &GetExportSnapshotRecordsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetExportSnapshotRecords API operation for Amazon Lightsail.
+//
+// Returns all export snapshot records created as a result of the export snapshot
+// operation.
+//
+// An export snapshot record can be used to create a new Amazon EC2 instance
+// and its related resources with the CreateCloudFormationStack action.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetExportSnapshotRecords for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetExportSnapshotRecords
+func (c *Lightsail) GetExportSnapshotRecords(input *GetExportSnapshotRecordsInput) (*GetExportSnapshotRecordsOutput, error) {
+ req, out := c.GetExportSnapshotRecordsRequest(input)
+ return out, req.Send()
+}
+
+// GetExportSnapshotRecordsWithContext is the same as GetExportSnapshotRecords with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetExportSnapshotRecords for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetExportSnapshotRecordsWithContext(ctx aws.Context, input *GetExportSnapshotRecordsInput, opts ...request.Option) (*GetExportSnapshotRecordsOutput, error) {
+ req, out := c.GetExportSnapshotRecordsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstance = "GetInstance"
+
+// GetInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstance for more information on using the GetInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstanceRequest method.
+// req, resp := client.GetInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstance
+func (c *Lightsail) GetInstanceRequest(input *GetInstanceInput) (req *request.Request, output *GetInstanceOutput) {
+ op := &request.Operation{
+ Name: opGetInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstanceInput{}
+ }
+
+ output = &GetInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstance API operation for Amazon Lightsail.
+//
+// Returns information about a specific Amazon Lightsail instance, which is
+// a virtual private server.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstance for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstance
+func (c *Lightsail) GetInstance(input *GetInstanceInput) (*GetInstanceOutput, error) {
+ req, out := c.GetInstanceRequest(input)
+ return out, req.Send()
+}
+
+// GetInstanceWithContext is the same as GetInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstanceWithContext(ctx aws.Context, input *GetInstanceInput, opts ...request.Option) (*GetInstanceOutput, error) {
+ req, out := c.GetInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstanceAccessDetails = "GetInstanceAccessDetails"
+
+// GetInstanceAccessDetailsRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstanceAccessDetails operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstanceAccessDetails for more information on using the GetInstanceAccessDetails
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstanceAccessDetailsRequest method.
+// req, resp := client.GetInstanceAccessDetailsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceAccessDetails
+func (c *Lightsail) GetInstanceAccessDetailsRequest(input *GetInstanceAccessDetailsInput) (req *request.Request, output *GetInstanceAccessDetailsOutput) {
+ op := &request.Operation{
+ Name: opGetInstanceAccessDetails,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstanceAccessDetailsInput{}
+ }
+
+ output = &GetInstanceAccessDetailsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstanceAccessDetails API operation for Amazon Lightsail.
+//
+// Returns temporary SSH keys you can use to connect to a specific virtual private
+// server, or instance.
+//
+// The get instance access details operation supports tag-based access control
+// via resource tags applied to the resource identified by instance name. For
+// more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstanceAccessDetails for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceAccessDetails
+func (c *Lightsail) GetInstanceAccessDetails(input *GetInstanceAccessDetailsInput) (*GetInstanceAccessDetailsOutput, error) {
+ req, out := c.GetInstanceAccessDetailsRequest(input)
+ return out, req.Send()
+}
+
+// GetInstanceAccessDetailsWithContext is the same as GetInstanceAccessDetails with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstanceAccessDetails for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstanceAccessDetailsWithContext(ctx aws.Context, input *GetInstanceAccessDetailsInput, opts ...request.Option) (*GetInstanceAccessDetailsOutput, error) {
+ req, out := c.GetInstanceAccessDetailsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstanceMetricData = "GetInstanceMetricData"
+
+// GetInstanceMetricDataRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstanceMetricData operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstanceMetricData for more information on using the GetInstanceMetricData
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstanceMetricDataRequest method.
+// req, resp := client.GetInstanceMetricDataRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceMetricData
+func (c *Lightsail) GetInstanceMetricDataRequest(input *GetInstanceMetricDataInput) (req *request.Request, output *GetInstanceMetricDataOutput) {
+ op := &request.Operation{
+ Name: opGetInstanceMetricData,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstanceMetricDataInput{}
+ }
+
+ output = &GetInstanceMetricDataOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstanceMetricData API operation for Amazon Lightsail.
+//
+// Returns the data points for the specified Amazon Lightsail instance metric,
+// given an instance name.
+//
+// Metrics report the utilization of your resources, and the error counts generated
+// by them. Monitor and collect metric data regularly to maintain the reliability,
+// availability, and performance of your resources.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstanceMetricData for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceMetricData
+func (c *Lightsail) GetInstanceMetricData(input *GetInstanceMetricDataInput) (*GetInstanceMetricDataOutput, error) {
+ req, out := c.GetInstanceMetricDataRequest(input)
+ return out, req.Send()
+}
+
+// GetInstanceMetricDataWithContext is the same as GetInstanceMetricData with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstanceMetricData for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstanceMetricDataWithContext(ctx aws.Context, input *GetInstanceMetricDataInput, opts ...request.Option) (*GetInstanceMetricDataOutput, error) {
+ req, out := c.GetInstanceMetricDataRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstancePortStates = "GetInstancePortStates"
+
+// GetInstancePortStatesRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstancePortStates operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstancePortStates for more information on using the GetInstancePortStates
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstancePortStatesRequest method.
+// req, resp := client.GetInstancePortStatesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstancePortStates
+func (c *Lightsail) GetInstancePortStatesRequest(input *GetInstancePortStatesInput) (req *request.Request, output *GetInstancePortStatesOutput) {
+ op := &request.Operation{
+ Name: opGetInstancePortStates,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstancePortStatesInput{}
+ }
+
+ output = &GetInstancePortStatesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstancePortStates API operation for Amazon Lightsail.
+//
+// Returns the firewall port states for a specific Amazon Lightsail instance,
+// the IP addresses allowed to connect to the instance through the ports, and
+// the protocol.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstancePortStates for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstancePortStates
+func (c *Lightsail) GetInstancePortStates(input *GetInstancePortStatesInput) (*GetInstancePortStatesOutput, error) {
+ req, out := c.GetInstancePortStatesRequest(input)
+ return out, req.Send()
+}
+
+// GetInstancePortStatesWithContext is the same as GetInstancePortStates with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstancePortStates for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstancePortStatesWithContext(ctx aws.Context, input *GetInstancePortStatesInput, opts ...request.Option) (*GetInstancePortStatesOutput, error) {
+ req, out := c.GetInstancePortStatesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstanceSnapshot = "GetInstanceSnapshot"
+
+// GetInstanceSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstanceSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstanceSnapshot for more information on using the GetInstanceSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstanceSnapshotRequest method.
+// req, resp := client.GetInstanceSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshot
+func (c *Lightsail) GetInstanceSnapshotRequest(input *GetInstanceSnapshotInput) (req *request.Request, output *GetInstanceSnapshotOutput) {
+ op := &request.Operation{
+ Name: opGetInstanceSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstanceSnapshotInput{}
+ }
+
+ output = &GetInstanceSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstanceSnapshot API operation for Amazon Lightsail.
+//
+// Returns information about a specific instance snapshot.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstanceSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshot
+func (c *Lightsail) GetInstanceSnapshot(input *GetInstanceSnapshotInput) (*GetInstanceSnapshotOutput, error) {
+ req, out := c.GetInstanceSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// GetInstanceSnapshotWithContext is the same as GetInstanceSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstanceSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstanceSnapshotWithContext(ctx aws.Context, input *GetInstanceSnapshotInput, opts ...request.Option) (*GetInstanceSnapshotOutput, error) {
+ req, out := c.GetInstanceSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstanceSnapshots = "GetInstanceSnapshots"
+
+// GetInstanceSnapshotsRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstanceSnapshots operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstanceSnapshots for more information on using the GetInstanceSnapshots
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstanceSnapshotsRequest method.
+// req, resp := client.GetInstanceSnapshotsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshots
+func (c *Lightsail) GetInstanceSnapshotsRequest(input *GetInstanceSnapshotsInput) (req *request.Request, output *GetInstanceSnapshotsOutput) {
+ op := &request.Operation{
+ Name: opGetInstanceSnapshots,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstanceSnapshotsInput{}
+ }
+
+ output = &GetInstanceSnapshotsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstanceSnapshots API operation for Amazon Lightsail.
+//
+// Returns all instance snapshots for the user's account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstanceSnapshots for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshots
+func (c *Lightsail) GetInstanceSnapshots(input *GetInstanceSnapshotsInput) (*GetInstanceSnapshotsOutput, error) {
+ req, out := c.GetInstanceSnapshotsRequest(input)
+ return out, req.Send()
+}
+
+// GetInstanceSnapshotsWithContext is the same as GetInstanceSnapshots with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstanceSnapshots for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstanceSnapshotsWithContext(ctx aws.Context, input *GetInstanceSnapshotsInput, opts ...request.Option) (*GetInstanceSnapshotsOutput, error) {
+ req, out := c.GetInstanceSnapshotsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstanceState = "GetInstanceState"
+
+// GetInstanceStateRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstanceState operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstanceState for more information on using the GetInstanceState
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstanceStateRequest method.
+// req, resp := client.GetInstanceStateRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceState
+func (c *Lightsail) GetInstanceStateRequest(input *GetInstanceStateInput) (req *request.Request, output *GetInstanceStateOutput) {
+ op := &request.Operation{
+ Name: opGetInstanceState,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstanceStateInput{}
+ }
+
+ output = &GetInstanceStateOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstanceState API operation for Amazon Lightsail.
+//
+// Returns the state of a specific instance. Works on one instance at a time.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstanceState for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceState
+func (c *Lightsail) GetInstanceState(input *GetInstanceStateInput) (*GetInstanceStateOutput, error) {
+ req, out := c.GetInstanceStateRequest(input)
+ return out, req.Send()
+}
+
+// GetInstanceStateWithContext is the same as GetInstanceState with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstanceState for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstanceStateWithContext(ctx aws.Context, input *GetInstanceStateInput, opts ...request.Option) (*GetInstanceStateOutput, error) {
+ req, out := c.GetInstanceStateRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetInstances = "GetInstances"
+
+// GetInstancesRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstances operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstances for more information on using the GetInstances
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetInstancesRequest method.
+// req, resp := client.GetInstancesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstances
+func (c *Lightsail) GetInstancesRequest(input *GetInstancesInput) (req *request.Request, output *GetInstancesOutput) {
+ op := &request.Operation{
+ Name: opGetInstances,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstancesInput{}
+ }
+
+ output = &GetInstancesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstances API operation for Amazon Lightsail.
+//
+// Returns information about all Amazon Lightsail virtual private servers, or
+// instances.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetInstances for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstances
+func (c *Lightsail) GetInstances(input *GetInstancesInput) (*GetInstancesOutput, error) {
+ req, out := c.GetInstancesRequest(input)
+ return out, req.Send()
+}
+
+// GetInstancesWithContext is the same as GetInstances with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstances for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetInstancesWithContext(ctx aws.Context, input *GetInstancesInput, opts ...request.Option) (*GetInstancesOutput, error) {
+ req, out := c.GetInstancesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetKeyPair = "GetKeyPair"
+
+// GetKeyPairRequest generates a "aws/request.Request" representing the
+// client's request for the GetKeyPair operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetKeyPair for more information on using the GetKeyPair
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetKeyPairRequest method.
+// req, resp := client.GetKeyPairRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPair
+func (c *Lightsail) GetKeyPairRequest(input *GetKeyPairInput) (req *request.Request, output *GetKeyPairOutput) {
+ op := &request.Operation{
+ Name: opGetKeyPair,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetKeyPairInput{}
+ }
+
+ output = &GetKeyPairOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetKeyPair API operation for Amazon Lightsail.
+//
+// Returns information about a specific key pair.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetKeyPair for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPair
+func (c *Lightsail) GetKeyPair(input *GetKeyPairInput) (*GetKeyPairOutput, error) {
+ req, out := c.GetKeyPairRequest(input)
+ return out, req.Send()
+}
+
+// GetKeyPairWithContext is the same as GetKeyPair with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetKeyPair for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetKeyPairWithContext(ctx aws.Context, input *GetKeyPairInput, opts ...request.Option) (*GetKeyPairOutput, error) {
+ req, out := c.GetKeyPairRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetKeyPairs = "GetKeyPairs"
+
+// GetKeyPairsRequest generates a "aws/request.Request" representing the
+// client's request for the GetKeyPairs operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetKeyPairs for more information on using the GetKeyPairs
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetKeyPairsRequest method.
+// req, resp := client.GetKeyPairsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPairs
+func (c *Lightsail) GetKeyPairsRequest(input *GetKeyPairsInput) (req *request.Request, output *GetKeyPairsOutput) {
+ op := &request.Operation{
+ Name: opGetKeyPairs,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetKeyPairsInput{}
+ }
+
+ output = &GetKeyPairsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetKeyPairs API operation for Amazon Lightsail.
+//
+// Returns information about all key pairs in the user's account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetKeyPairs for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPairs
+func (c *Lightsail) GetKeyPairs(input *GetKeyPairsInput) (*GetKeyPairsOutput, error) {
+ req, out := c.GetKeyPairsRequest(input)
+ return out, req.Send()
+}
+
+// GetKeyPairsWithContext is the same as GetKeyPairs with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetKeyPairs for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetKeyPairsWithContext(ctx aws.Context, input *GetKeyPairsInput, opts ...request.Option) (*GetKeyPairsOutput, error) {
+ req, out := c.GetKeyPairsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetLoadBalancer = "GetLoadBalancer"
+
+// GetLoadBalancerRequest generates a "aws/request.Request" representing the
+// client's request for the GetLoadBalancer operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetLoadBalancer for more information on using the GetLoadBalancer
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetLoadBalancerRequest method.
+// req, resp := client.GetLoadBalancerRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancer
+func (c *Lightsail) GetLoadBalancerRequest(input *GetLoadBalancerInput) (req *request.Request, output *GetLoadBalancerOutput) {
+ op := &request.Operation{
+ Name: opGetLoadBalancer,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetLoadBalancerInput{}
+ }
+
+ output = &GetLoadBalancerOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetLoadBalancer API operation for Amazon Lightsail.
+//
+// Returns information about the specified Lightsail load balancer.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetLoadBalancer for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancer
+func (c *Lightsail) GetLoadBalancer(input *GetLoadBalancerInput) (*GetLoadBalancerOutput, error) {
+ req, out := c.GetLoadBalancerRequest(input)
+ return out, req.Send()
+}
+
+// GetLoadBalancerWithContext is the same as GetLoadBalancer with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetLoadBalancer for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetLoadBalancerWithContext(ctx aws.Context, input *GetLoadBalancerInput, opts ...request.Option) (*GetLoadBalancerOutput, error) {
+ req, out := c.GetLoadBalancerRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetLoadBalancerMetricData = "GetLoadBalancerMetricData"
+
+// GetLoadBalancerMetricDataRequest generates a "aws/request.Request" representing the
+// client's request for the GetLoadBalancerMetricData operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetLoadBalancerMetricData for more information on using the GetLoadBalancerMetricData
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetLoadBalancerMetricDataRequest method.
+// req, resp := client.GetLoadBalancerMetricDataRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerMetricData
+func (c *Lightsail) GetLoadBalancerMetricDataRequest(input *GetLoadBalancerMetricDataInput) (req *request.Request, output *GetLoadBalancerMetricDataOutput) {
+ op := &request.Operation{
+ Name: opGetLoadBalancerMetricData,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetLoadBalancerMetricDataInput{}
+ }
+
+ output = &GetLoadBalancerMetricDataOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetLoadBalancerMetricData API operation for Amazon Lightsail.
+//
+// Returns information about health metrics for your Lightsail load balancer.
+//
+// Metrics report the utilization of your resources, and the error counts generated
+// by them. Monitor and collect metric data regularly to maintain the reliability,
+// availability, and performance of your resources.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetLoadBalancerMetricData for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerMetricData
+func (c *Lightsail) GetLoadBalancerMetricData(input *GetLoadBalancerMetricDataInput) (*GetLoadBalancerMetricDataOutput, error) {
+ req, out := c.GetLoadBalancerMetricDataRequest(input)
+ return out, req.Send()
+}
+
+// GetLoadBalancerMetricDataWithContext is the same as GetLoadBalancerMetricData with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetLoadBalancerMetricData for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetLoadBalancerMetricDataWithContext(ctx aws.Context, input *GetLoadBalancerMetricDataInput, opts ...request.Option) (*GetLoadBalancerMetricDataOutput, error) {
+ req, out := c.GetLoadBalancerMetricDataRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetLoadBalancerTlsCertificates = "GetLoadBalancerTlsCertificates"
+
+// GetLoadBalancerTlsCertificatesRequest generates a "aws/request.Request" representing the
+// client's request for the GetLoadBalancerTlsCertificates operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetLoadBalancerTlsCertificates for more information on using the GetLoadBalancerTlsCertificates
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetLoadBalancerTlsCertificatesRequest method.
+// req, resp := client.GetLoadBalancerTlsCertificatesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerTlsCertificates
+func (c *Lightsail) GetLoadBalancerTlsCertificatesRequest(input *GetLoadBalancerTlsCertificatesInput) (req *request.Request, output *GetLoadBalancerTlsCertificatesOutput) {
+ op := &request.Operation{
+ Name: opGetLoadBalancerTlsCertificates,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetLoadBalancerTlsCertificatesInput{}
+ }
+
+ output = &GetLoadBalancerTlsCertificatesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetLoadBalancerTlsCertificates API operation for Amazon Lightsail.
+//
+// Returns information about the TLS certificates that are associated with the
+// specified Lightsail load balancer.
+//
+// TLS is just an updated, more secure version of Secure Socket Layer (SSL).
+//
+// You can have a maximum of 2 certificates associated with a Lightsail load
+// balancer. One is active and the other is inactive.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetLoadBalancerTlsCertificates for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerTlsCertificates
+func (c *Lightsail) GetLoadBalancerTlsCertificates(input *GetLoadBalancerTlsCertificatesInput) (*GetLoadBalancerTlsCertificatesOutput, error) {
+ req, out := c.GetLoadBalancerTlsCertificatesRequest(input)
+ return out, req.Send()
+}
+
+// GetLoadBalancerTlsCertificatesWithContext is the same as GetLoadBalancerTlsCertificates with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetLoadBalancerTlsCertificates for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetLoadBalancerTlsCertificatesWithContext(ctx aws.Context, input *GetLoadBalancerTlsCertificatesInput, opts ...request.Option) (*GetLoadBalancerTlsCertificatesOutput, error) {
+ req, out := c.GetLoadBalancerTlsCertificatesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetLoadBalancers = "GetLoadBalancers"
+
+// GetLoadBalancersRequest generates a "aws/request.Request" representing the
+// client's request for the GetLoadBalancers operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetLoadBalancers for more information on using the GetLoadBalancers
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetLoadBalancersRequest method.
+// req, resp := client.GetLoadBalancersRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancers
+func (c *Lightsail) GetLoadBalancersRequest(input *GetLoadBalancersInput) (req *request.Request, output *GetLoadBalancersOutput) {
+ op := &request.Operation{
+ Name: opGetLoadBalancers,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetLoadBalancersInput{}
+ }
+
+ output = &GetLoadBalancersOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetLoadBalancers API operation for Amazon Lightsail.
+//
+// Returns information about all load balancers in an account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetLoadBalancers for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancers
+func (c *Lightsail) GetLoadBalancers(input *GetLoadBalancersInput) (*GetLoadBalancersOutput, error) {
+ req, out := c.GetLoadBalancersRequest(input)
+ return out, req.Send()
+}
+
+// GetLoadBalancersWithContext is the same as GetLoadBalancers with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetLoadBalancers for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetLoadBalancersWithContext(ctx aws.Context, input *GetLoadBalancersInput, opts ...request.Option) (*GetLoadBalancersOutput, error) {
+ req, out := c.GetLoadBalancersRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetOperation = "GetOperation"
+
+// GetOperationRequest generates a "aws/request.Request" representing the
+// client's request for the GetOperation operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetOperation for more information on using the GetOperation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetOperationRequest method.
+// req, resp := client.GetOperationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperation
+func (c *Lightsail) GetOperationRequest(input *GetOperationInput) (req *request.Request, output *GetOperationOutput) {
+ op := &request.Operation{
+ Name: opGetOperation,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetOperationInput{}
+ }
+
+ output = &GetOperationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetOperation API operation for Amazon Lightsail.
+//
+// Returns information about a specific operation. Operations include events
+// such as when you create an instance, allocate a static IP, attach a static
+// IP, and so on.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetOperation for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperation
+func (c *Lightsail) GetOperation(input *GetOperationInput) (*GetOperationOutput, error) {
+ req, out := c.GetOperationRequest(input)
+ return out, req.Send()
+}
+
+// GetOperationWithContext is the same as GetOperation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetOperation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetOperationWithContext(ctx aws.Context, input *GetOperationInput, opts ...request.Option) (*GetOperationOutput, error) {
+ req, out := c.GetOperationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetOperations = "GetOperations"
+
+// GetOperationsRequest generates a "aws/request.Request" representing the
+// client's request for the GetOperations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetOperations for more information on using the GetOperations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetOperationsRequest method.
+// req, resp := client.GetOperationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperations
+func (c *Lightsail) GetOperationsRequest(input *GetOperationsInput) (req *request.Request, output *GetOperationsOutput) {
+ op := &request.Operation{
+ Name: opGetOperations,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetOperationsInput{}
+ }
+
+ output = &GetOperationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetOperations API operation for Amazon Lightsail.
+//
+// Returns information about all operations.
+//
+// Results are returned from oldest to newest, up to a maximum of 200. Results
+// can be paged by making each subsequent call to GetOperations use the maximum
+// (last) statusChangedAt value from the previous request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetOperations for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperations
+func (c *Lightsail) GetOperations(input *GetOperationsInput) (*GetOperationsOutput, error) {
+ req, out := c.GetOperationsRequest(input)
+ return out, req.Send()
+}
+
+// GetOperationsWithContext is the same as GetOperations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetOperations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetOperationsWithContext(ctx aws.Context, input *GetOperationsInput, opts ...request.Option) (*GetOperationsOutput, error) {
+ req, out := c.GetOperationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetOperationsForResource = "GetOperationsForResource"
+
+// GetOperationsForResourceRequest generates a "aws/request.Request" representing the
+// client's request for the GetOperationsForResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetOperationsForResource for more information on using the GetOperationsForResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetOperationsForResourceRequest method.
+// req, resp := client.GetOperationsForResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperationsForResource
+func (c *Lightsail) GetOperationsForResourceRequest(input *GetOperationsForResourceInput) (req *request.Request, output *GetOperationsForResourceOutput) {
+ op := &request.Operation{
+ Name: opGetOperationsForResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetOperationsForResourceInput{}
+ }
+
+ output = &GetOperationsForResourceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetOperationsForResource API operation for Amazon Lightsail.
+//
+// Gets operations for a specific resource (e.g., an instance or a static IP).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetOperationsForResource for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperationsForResource
+func (c *Lightsail) GetOperationsForResource(input *GetOperationsForResourceInput) (*GetOperationsForResourceOutput, error) {
+ req, out := c.GetOperationsForResourceRequest(input)
+ return out, req.Send()
+}
+
+// GetOperationsForResourceWithContext is the same as GetOperationsForResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetOperationsForResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetOperationsForResourceWithContext(ctx aws.Context, input *GetOperationsForResourceInput, opts ...request.Option) (*GetOperationsForResourceOutput, error) {
+ req, out := c.GetOperationsForResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRegions = "GetRegions"
+
+// GetRegionsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRegions operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRegions for more information on using the GetRegions
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRegionsRequest method.
+// req, resp := client.GetRegionsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRegions
+func (c *Lightsail) GetRegionsRequest(input *GetRegionsInput) (req *request.Request, output *GetRegionsOutput) {
+ op := &request.Operation{
+ Name: opGetRegions,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRegionsInput{}
+ }
+
+ output = &GetRegionsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRegions API operation for Amazon Lightsail.
+//
+// Returns a list of all valid regions for Amazon Lightsail. Use the include
+// availability zones parameter to also return the Availability Zones in a region.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRegions for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRegions
+func (c *Lightsail) GetRegions(input *GetRegionsInput) (*GetRegionsOutput, error) {
+ req, out := c.GetRegionsRequest(input)
+ return out, req.Send()
+}
+
+// GetRegionsWithContext is the same as GetRegions with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRegions for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRegionsWithContext(ctx aws.Context, input *GetRegionsInput, opts ...request.Option) (*GetRegionsOutput, error) {
+ req, out := c.GetRegionsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabase = "GetRelationalDatabase"
+
+// GetRelationalDatabaseRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabase operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabase for more information on using the GetRelationalDatabase
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseRequest method.
+// req, resp := client.GetRelationalDatabaseRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabase
+func (c *Lightsail) GetRelationalDatabaseRequest(input *GetRelationalDatabaseInput) (req *request.Request, output *GetRelationalDatabaseOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabase,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseInput{}
+ }
+
+ output = &GetRelationalDatabaseOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabase API operation for Amazon Lightsail.
+//
+// Returns information about a specific database in Amazon Lightsail.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabase for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabase
+func (c *Lightsail) GetRelationalDatabase(input *GetRelationalDatabaseInput) (*GetRelationalDatabaseOutput, error) {
+ req, out := c.GetRelationalDatabaseRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseWithContext is the same as GetRelationalDatabase with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabase for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseWithContext(ctx aws.Context, input *GetRelationalDatabaseInput, opts ...request.Option) (*GetRelationalDatabaseOutput, error) {
+ req, out := c.GetRelationalDatabaseRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseBlueprints = "GetRelationalDatabaseBlueprints"
+
+// GetRelationalDatabaseBlueprintsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseBlueprints operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseBlueprints for more information on using the GetRelationalDatabaseBlueprints
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseBlueprintsRequest method.
+// req, resp := client.GetRelationalDatabaseBlueprintsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBlueprints
+func (c *Lightsail) GetRelationalDatabaseBlueprintsRequest(input *GetRelationalDatabaseBlueprintsInput) (req *request.Request, output *GetRelationalDatabaseBlueprintsOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseBlueprints,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseBlueprintsInput{}
+ }
+
+ output = &GetRelationalDatabaseBlueprintsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseBlueprints API operation for Amazon Lightsail.
+//
+// Returns a list of available database blueprints in Amazon Lightsail. A blueprint
+// describes the major engine version of a database.
+//
+// You can use a blueprint ID to create a new database that runs a specific
+// database engine.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseBlueprints for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBlueprints
+func (c *Lightsail) GetRelationalDatabaseBlueprints(input *GetRelationalDatabaseBlueprintsInput) (*GetRelationalDatabaseBlueprintsOutput, error) {
+ req, out := c.GetRelationalDatabaseBlueprintsRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseBlueprintsWithContext is the same as GetRelationalDatabaseBlueprints with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseBlueprints for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseBlueprintsWithContext(ctx aws.Context, input *GetRelationalDatabaseBlueprintsInput, opts ...request.Option) (*GetRelationalDatabaseBlueprintsOutput, error) {
+ req, out := c.GetRelationalDatabaseBlueprintsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseBundles = "GetRelationalDatabaseBundles"
+
+// GetRelationalDatabaseBundlesRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseBundles operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseBundles for more information on using the GetRelationalDatabaseBundles
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseBundlesRequest method.
+// req, resp := client.GetRelationalDatabaseBundlesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBundles
+func (c *Lightsail) GetRelationalDatabaseBundlesRequest(input *GetRelationalDatabaseBundlesInput) (req *request.Request, output *GetRelationalDatabaseBundlesOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseBundles,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseBundlesInput{}
+ }
+
+ output = &GetRelationalDatabaseBundlesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseBundles API operation for Amazon Lightsail.
+//
+// Returns the list of bundles that are available in Amazon Lightsail. A bundle
+// describes the performance specifications for a database.
+//
+// You can use a bundle ID to create a new database with explicit performance
+// specifications.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseBundles for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBundles
+func (c *Lightsail) GetRelationalDatabaseBundles(input *GetRelationalDatabaseBundlesInput) (*GetRelationalDatabaseBundlesOutput, error) {
+ req, out := c.GetRelationalDatabaseBundlesRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseBundlesWithContext is the same as GetRelationalDatabaseBundles with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseBundles for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseBundlesWithContext(ctx aws.Context, input *GetRelationalDatabaseBundlesInput, opts ...request.Option) (*GetRelationalDatabaseBundlesOutput, error) {
+ req, out := c.GetRelationalDatabaseBundlesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseEvents = "GetRelationalDatabaseEvents"
+
+// GetRelationalDatabaseEventsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseEvents operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseEvents for more information on using the GetRelationalDatabaseEvents
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseEventsRequest method.
+// req, resp := client.GetRelationalDatabaseEventsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseEvents
+func (c *Lightsail) GetRelationalDatabaseEventsRequest(input *GetRelationalDatabaseEventsInput) (req *request.Request, output *GetRelationalDatabaseEventsOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseEvents,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseEventsInput{}
+ }
+
+ output = &GetRelationalDatabaseEventsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseEvents API operation for Amazon Lightsail.
+//
+// Returns a list of events for a specific database in Amazon Lightsail.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseEvents for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseEvents
+func (c *Lightsail) GetRelationalDatabaseEvents(input *GetRelationalDatabaseEventsInput) (*GetRelationalDatabaseEventsOutput, error) {
+ req, out := c.GetRelationalDatabaseEventsRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseEventsWithContext is the same as GetRelationalDatabaseEvents with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseEvents for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseEventsWithContext(ctx aws.Context, input *GetRelationalDatabaseEventsInput, opts ...request.Option) (*GetRelationalDatabaseEventsOutput, error) {
+ req, out := c.GetRelationalDatabaseEventsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseLogEvents = "GetRelationalDatabaseLogEvents"
+
+// GetRelationalDatabaseLogEventsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseLogEvents operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseLogEvents for more information on using the GetRelationalDatabaseLogEvents
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseLogEventsRequest method.
+// req, resp := client.GetRelationalDatabaseLogEventsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogEvents
+func (c *Lightsail) GetRelationalDatabaseLogEventsRequest(input *GetRelationalDatabaseLogEventsInput) (req *request.Request, output *GetRelationalDatabaseLogEventsOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseLogEvents,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseLogEventsInput{}
+ }
+
+ output = &GetRelationalDatabaseLogEventsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseLogEvents API operation for Amazon Lightsail.
+//
+// Returns a list of log events for a database in Amazon Lightsail.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseLogEvents for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogEvents
+func (c *Lightsail) GetRelationalDatabaseLogEvents(input *GetRelationalDatabaseLogEventsInput) (*GetRelationalDatabaseLogEventsOutput, error) {
+ req, out := c.GetRelationalDatabaseLogEventsRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseLogEventsWithContext is the same as GetRelationalDatabaseLogEvents with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseLogEvents for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseLogEventsWithContext(ctx aws.Context, input *GetRelationalDatabaseLogEventsInput, opts ...request.Option) (*GetRelationalDatabaseLogEventsOutput, error) {
+ req, out := c.GetRelationalDatabaseLogEventsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseLogStreams = "GetRelationalDatabaseLogStreams"
+
+// GetRelationalDatabaseLogStreamsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseLogStreams operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseLogStreams for more information on using the GetRelationalDatabaseLogStreams
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseLogStreamsRequest method.
+// req, resp := client.GetRelationalDatabaseLogStreamsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogStreams
+func (c *Lightsail) GetRelationalDatabaseLogStreamsRequest(input *GetRelationalDatabaseLogStreamsInput) (req *request.Request, output *GetRelationalDatabaseLogStreamsOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseLogStreams,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseLogStreamsInput{}
+ }
+
+ output = &GetRelationalDatabaseLogStreamsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseLogStreams API operation for Amazon Lightsail.
+//
+// Returns a list of available log streams for a specific database in Amazon
+// Lightsail.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseLogStreams for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogStreams
+func (c *Lightsail) GetRelationalDatabaseLogStreams(input *GetRelationalDatabaseLogStreamsInput) (*GetRelationalDatabaseLogStreamsOutput, error) {
+ req, out := c.GetRelationalDatabaseLogStreamsRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseLogStreamsWithContext is the same as GetRelationalDatabaseLogStreams with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseLogStreams for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseLogStreamsWithContext(ctx aws.Context, input *GetRelationalDatabaseLogStreamsInput, opts ...request.Option) (*GetRelationalDatabaseLogStreamsOutput, error) {
+ req, out := c.GetRelationalDatabaseLogStreamsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseMasterUserPassword = "GetRelationalDatabaseMasterUserPassword"
+
+// GetRelationalDatabaseMasterUserPasswordRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseMasterUserPassword operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseMasterUserPassword for more information on using the GetRelationalDatabaseMasterUserPassword
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseMasterUserPasswordRequest method.
+// req, resp := client.GetRelationalDatabaseMasterUserPasswordRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMasterUserPassword
+func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordRequest(input *GetRelationalDatabaseMasterUserPasswordInput) (req *request.Request, output *GetRelationalDatabaseMasterUserPasswordOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseMasterUserPassword,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseMasterUserPasswordInput{}
+ }
+
+ output = &GetRelationalDatabaseMasterUserPasswordOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseMasterUserPassword API operation for Amazon Lightsail.
+//
+// Returns the current, previous, or pending versions of the master user password
+// for a Lightsail database.
+//
+// The GetRelationalDatabaseMasterUserPassword operation supports tag-based
+// access control via resource tags applied to the resource identified by relationalDatabaseName.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseMasterUserPassword for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMasterUserPassword
+func (c *Lightsail) GetRelationalDatabaseMasterUserPassword(input *GetRelationalDatabaseMasterUserPasswordInput) (*GetRelationalDatabaseMasterUserPasswordOutput, error) {
+ req, out := c.GetRelationalDatabaseMasterUserPasswordRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseMasterUserPasswordWithContext is the same as GetRelationalDatabaseMasterUserPassword with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseMasterUserPassword for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordWithContext(ctx aws.Context, input *GetRelationalDatabaseMasterUserPasswordInput, opts ...request.Option) (*GetRelationalDatabaseMasterUserPasswordOutput, error) {
+ req, out := c.GetRelationalDatabaseMasterUserPasswordRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseMetricData = "GetRelationalDatabaseMetricData"
+
+// GetRelationalDatabaseMetricDataRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseMetricData operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseMetricData for more information on using the GetRelationalDatabaseMetricData
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseMetricDataRequest method.
+// req, resp := client.GetRelationalDatabaseMetricDataRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMetricData
+func (c *Lightsail) GetRelationalDatabaseMetricDataRequest(input *GetRelationalDatabaseMetricDataInput) (req *request.Request, output *GetRelationalDatabaseMetricDataOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseMetricData,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseMetricDataInput{}
+ }
+
+ output = &GetRelationalDatabaseMetricDataOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseMetricData API operation for Amazon Lightsail.
+//
+// Returns the data points of the specified metric for a database in Amazon
+// Lightsail.
+//
+// Metrics report the utilization of your resources, and the error counts generated
+// by them. Monitor and collect metric data regularly to maintain the reliability,
+// availability, and performance of your resources.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseMetricData for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMetricData
+func (c *Lightsail) GetRelationalDatabaseMetricData(input *GetRelationalDatabaseMetricDataInput) (*GetRelationalDatabaseMetricDataOutput, error) {
+ req, out := c.GetRelationalDatabaseMetricDataRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseMetricDataWithContext is the same as GetRelationalDatabaseMetricData with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseMetricData for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseMetricDataWithContext(ctx aws.Context, input *GetRelationalDatabaseMetricDataInput, opts ...request.Option) (*GetRelationalDatabaseMetricDataOutput, error) {
+ req, out := c.GetRelationalDatabaseMetricDataRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseParameters = "GetRelationalDatabaseParameters"
+
+// GetRelationalDatabaseParametersRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseParameters operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseParameters for more information on using the GetRelationalDatabaseParameters
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseParametersRequest method.
+// req, resp := client.GetRelationalDatabaseParametersRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseParameters
+func (c *Lightsail) GetRelationalDatabaseParametersRequest(input *GetRelationalDatabaseParametersInput) (req *request.Request, output *GetRelationalDatabaseParametersOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseParameters,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseParametersInput{}
+ }
+
+ output = &GetRelationalDatabaseParametersOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseParameters API operation for Amazon Lightsail.
+//
+// Returns all of the runtime parameters offered by the underlying database
+// software, or engine, for a specific database in Amazon Lightsail.
+//
+// In addition to the parameter names and values, this operation returns other
+// information about each parameter. This information includes whether changes
+// require a reboot, whether the parameter is modifiable, the allowed values,
+// and the data types.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseParameters for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseParameters
+func (c *Lightsail) GetRelationalDatabaseParameters(input *GetRelationalDatabaseParametersInput) (*GetRelationalDatabaseParametersOutput, error) {
+ req, out := c.GetRelationalDatabaseParametersRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseParametersWithContext is the same as GetRelationalDatabaseParameters with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseParameters for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseParametersWithContext(ctx aws.Context, input *GetRelationalDatabaseParametersInput, opts ...request.Option) (*GetRelationalDatabaseParametersOutput, error) {
+ req, out := c.GetRelationalDatabaseParametersRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseSnapshot = "GetRelationalDatabaseSnapshot"
+
+// GetRelationalDatabaseSnapshotRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseSnapshot operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseSnapshot for more information on using the GetRelationalDatabaseSnapshot
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseSnapshotRequest method.
+// req, resp := client.GetRelationalDatabaseSnapshotRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshot
+func (c *Lightsail) GetRelationalDatabaseSnapshotRequest(input *GetRelationalDatabaseSnapshotInput) (req *request.Request, output *GetRelationalDatabaseSnapshotOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseSnapshot,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseSnapshotInput{}
+ }
+
+ output = &GetRelationalDatabaseSnapshotOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseSnapshot API operation for Amazon Lightsail.
+//
+// Returns information about a specific database snapshot in Amazon Lightsail.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseSnapshot for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshot
+func (c *Lightsail) GetRelationalDatabaseSnapshot(input *GetRelationalDatabaseSnapshotInput) (*GetRelationalDatabaseSnapshotOutput, error) {
+ req, out := c.GetRelationalDatabaseSnapshotRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseSnapshotWithContext is the same as GetRelationalDatabaseSnapshot with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseSnapshot for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseSnapshotWithContext(ctx aws.Context, input *GetRelationalDatabaseSnapshotInput, opts ...request.Option) (*GetRelationalDatabaseSnapshotOutput, error) {
+ req, out := c.GetRelationalDatabaseSnapshotRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabaseSnapshots = "GetRelationalDatabaseSnapshots"
+
+// GetRelationalDatabaseSnapshotsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabaseSnapshots operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabaseSnapshots for more information on using the GetRelationalDatabaseSnapshots
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabaseSnapshotsRequest method.
+// req, resp := client.GetRelationalDatabaseSnapshotsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshots
+func (c *Lightsail) GetRelationalDatabaseSnapshotsRequest(input *GetRelationalDatabaseSnapshotsInput) (req *request.Request, output *GetRelationalDatabaseSnapshotsOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabaseSnapshots,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabaseSnapshotsInput{}
+ }
+
+ output = &GetRelationalDatabaseSnapshotsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabaseSnapshots API operation for Amazon Lightsail.
+//
+// Returns information about all of your database snapshots in Amazon Lightsail.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabaseSnapshots for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshots
+func (c *Lightsail) GetRelationalDatabaseSnapshots(input *GetRelationalDatabaseSnapshotsInput) (*GetRelationalDatabaseSnapshotsOutput, error) {
+ req, out := c.GetRelationalDatabaseSnapshotsRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabaseSnapshotsWithContext is the same as GetRelationalDatabaseSnapshots with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabaseSnapshots for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabaseSnapshotsWithContext(ctx aws.Context, input *GetRelationalDatabaseSnapshotsInput, opts ...request.Option) (*GetRelationalDatabaseSnapshotsOutput, error) {
+ req, out := c.GetRelationalDatabaseSnapshotsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRelationalDatabases = "GetRelationalDatabases"
+
+// GetRelationalDatabasesRequest generates a "aws/request.Request" representing the
+// client's request for the GetRelationalDatabases operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRelationalDatabases for more information on using the GetRelationalDatabases
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRelationalDatabasesRequest method.
+// req, resp := client.GetRelationalDatabasesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabases
+func (c *Lightsail) GetRelationalDatabasesRequest(input *GetRelationalDatabasesInput) (req *request.Request, output *GetRelationalDatabasesOutput) {
+ op := &request.Operation{
+ Name: opGetRelationalDatabases,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRelationalDatabasesInput{}
+ }
+
+ output = &GetRelationalDatabasesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRelationalDatabases API operation for Amazon Lightsail.
+//
+// Returns information about all of your databases in Amazon Lightsail.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetRelationalDatabases for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabases
+func (c *Lightsail) GetRelationalDatabases(input *GetRelationalDatabasesInput) (*GetRelationalDatabasesOutput, error) {
+ req, out := c.GetRelationalDatabasesRequest(input)
+ return out, req.Send()
+}
+
+// GetRelationalDatabasesWithContext is the same as GetRelationalDatabases with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRelationalDatabases for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetRelationalDatabasesWithContext(ctx aws.Context, input *GetRelationalDatabasesInput, opts ...request.Option) (*GetRelationalDatabasesOutput, error) {
+ req, out := c.GetRelationalDatabasesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetStaticIp = "GetStaticIp"
+
+// GetStaticIpRequest generates a "aws/request.Request" representing the
+// client's request for the GetStaticIp operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetStaticIp for more information on using the GetStaticIp
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetStaticIpRequest method.
+// req, resp := client.GetStaticIpRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIp
+func (c *Lightsail) GetStaticIpRequest(input *GetStaticIpInput) (req *request.Request, output *GetStaticIpOutput) {
+ op := &request.Operation{
+ Name: opGetStaticIp,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetStaticIpInput{}
+ }
+
+ output = &GetStaticIpOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetStaticIp API operation for Amazon Lightsail.
+//
+// Returns information about an Amazon Lightsail static IP.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetStaticIp for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIp
+func (c *Lightsail) GetStaticIp(input *GetStaticIpInput) (*GetStaticIpOutput, error) {
+ req, out := c.GetStaticIpRequest(input)
+ return out, req.Send()
+}
+
+// GetStaticIpWithContext is the same as GetStaticIp with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetStaticIp for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetStaticIpWithContext(ctx aws.Context, input *GetStaticIpInput, opts ...request.Option) (*GetStaticIpOutput, error) {
+ req, out := c.GetStaticIpRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetStaticIps = "GetStaticIps"
+
+// GetStaticIpsRequest generates a "aws/request.Request" representing the
+// client's request for the GetStaticIps operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetStaticIps for more information on using the GetStaticIps
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetStaticIpsRequest method.
+// req, resp := client.GetStaticIpsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIps
+func (c *Lightsail) GetStaticIpsRequest(input *GetStaticIpsInput) (req *request.Request, output *GetStaticIpsOutput) {
+ op := &request.Operation{
+ Name: opGetStaticIps,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetStaticIpsInput{}
+ }
+
+ output = &GetStaticIpsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetStaticIps API operation for Amazon Lightsail.
+//
+// Returns information about all static IPs in the user's account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation GetStaticIps for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIps
+func (c *Lightsail) GetStaticIps(input *GetStaticIpsInput) (*GetStaticIpsOutput, error) {
+ req, out := c.GetStaticIpsRequest(input)
+ return out, req.Send()
+}
+
+// GetStaticIpsWithContext is the same as GetStaticIps with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetStaticIps for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) GetStaticIpsWithContext(ctx aws.Context, input *GetStaticIpsInput, opts ...request.Option) (*GetStaticIpsOutput, error) {
+ req, out := c.GetStaticIpsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opImportKeyPair = "ImportKeyPair"
+
+// ImportKeyPairRequest generates a "aws/request.Request" representing the
+// client's request for the ImportKeyPair operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ImportKeyPair for more information on using the ImportKeyPair
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ImportKeyPairRequest method.
+// req, resp := client.ImportKeyPairRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ImportKeyPair
+func (c *Lightsail) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) {
+ op := &request.Operation{
+ Name: opImportKeyPair,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ImportKeyPairInput{}
+ }
+
+ output = &ImportKeyPairOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ImportKeyPair API operation for Amazon Lightsail.
+//
+// Imports a public SSH key from a specific key pair.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation ImportKeyPair for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ImportKeyPair
+func (c *Lightsail) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) {
+ req, out := c.ImportKeyPairRequest(input)
+ return out, req.Send()
+}
+
+// ImportKeyPairWithContext is the same as ImportKeyPair with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ImportKeyPair for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) ImportKeyPairWithContext(ctx aws.Context, input *ImportKeyPairInput, opts ...request.Option) (*ImportKeyPairOutput, error) {
+ req, out := c.ImportKeyPairRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opIsVpcPeered = "IsVpcPeered"
+
+// IsVpcPeeredRequest generates a "aws/request.Request" representing the
+// client's request for the IsVpcPeered operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See IsVpcPeered for more information on using the IsVpcPeered
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the IsVpcPeeredRequest method.
+// req, resp := client.IsVpcPeeredRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/IsVpcPeered
+func (c *Lightsail) IsVpcPeeredRequest(input *IsVpcPeeredInput) (req *request.Request, output *IsVpcPeeredOutput) {
+ op := &request.Operation{
+ Name: opIsVpcPeered,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &IsVpcPeeredInput{}
+ }
+
+ output = &IsVpcPeeredOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// IsVpcPeered API operation for Amazon Lightsail.
+//
+// Returns a Boolean value indicating whether your Lightsail VPC is peered.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation IsVpcPeered for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/IsVpcPeered
+func (c *Lightsail) IsVpcPeered(input *IsVpcPeeredInput) (*IsVpcPeeredOutput, error) {
+ req, out := c.IsVpcPeeredRequest(input)
+ return out, req.Send()
+}
+
+// IsVpcPeeredWithContext is the same as IsVpcPeered with the addition of
+// the ability to pass a context and additional request options.
+//
+// See IsVpcPeered for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) IsVpcPeeredWithContext(ctx aws.Context, input *IsVpcPeeredInput, opts ...request.Option) (*IsVpcPeeredOutput, error) {
+ req, out := c.IsVpcPeeredRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opOpenInstancePublicPorts = "OpenInstancePublicPorts"
+
+// OpenInstancePublicPortsRequest generates a "aws/request.Request" representing the
+// client's request for the OpenInstancePublicPorts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See OpenInstancePublicPorts for more information on using the OpenInstancePublicPorts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the OpenInstancePublicPortsRequest method.
+// req, resp := client.OpenInstancePublicPortsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/OpenInstancePublicPorts
+func (c *Lightsail) OpenInstancePublicPortsRequest(input *OpenInstancePublicPortsInput) (req *request.Request, output *OpenInstancePublicPortsOutput) {
+ op := &request.Operation{
+ Name: opOpenInstancePublicPorts,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &OpenInstancePublicPortsInput{}
+ }
+
+ output = &OpenInstancePublicPortsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// OpenInstancePublicPorts API operation for Amazon Lightsail.
+//
+// Opens ports for a specific Amazon Lightsail instance, and specifies the IP
+// addresses allowed to connect to the instance through the ports, and the protocol.
+//
+// The OpenInstancePublicPorts action supports tag-based access control via
+// resource tags applied to the resource identified by instanceName. For more
+// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation OpenInstancePublicPorts for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/OpenInstancePublicPorts
+func (c *Lightsail) OpenInstancePublicPorts(input *OpenInstancePublicPortsInput) (*OpenInstancePublicPortsOutput, error) {
+ req, out := c.OpenInstancePublicPortsRequest(input)
+ return out, req.Send()
+}
+
+// OpenInstancePublicPortsWithContext is the same as OpenInstancePublicPorts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See OpenInstancePublicPorts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) OpenInstancePublicPortsWithContext(ctx aws.Context, input *OpenInstancePublicPortsInput, opts ...request.Option) (*OpenInstancePublicPortsOutput, error) {
+ req, out := c.OpenInstancePublicPortsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPeerVpc = "PeerVpc"
+
+// PeerVpcRequest generates a "aws/request.Request" representing the
+// client's request for the PeerVpc operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PeerVpc for more information on using the PeerVpc
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PeerVpcRequest method.
+// req, resp := client.PeerVpcRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PeerVpc
+func (c *Lightsail) PeerVpcRequest(input *PeerVpcInput) (req *request.Request, output *PeerVpcOutput) {
+ op := &request.Operation{
+ Name: opPeerVpc,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PeerVpcInput{}
+ }
+
+ output = &PeerVpcOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PeerVpc API operation for Amazon Lightsail.
+//
+// Peers the Lightsail VPC with the user's default VPC.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation PeerVpc for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PeerVpc
+func (c *Lightsail) PeerVpc(input *PeerVpcInput) (*PeerVpcOutput, error) {
+ req, out := c.PeerVpcRequest(input)
+ return out, req.Send()
+}
+
+// PeerVpcWithContext is the same as PeerVpc with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PeerVpc for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) PeerVpcWithContext(ctx aws.Context, input *PeerVpcInput, opts ...request.Option) (*PeerVpcOutput, error) {
+ req, out := c.PeerVpcRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutAlarm = "PutAlarm"
+
+// PutAlarmRequest generates a "aws/request.Request" representing the
+// client's request for the PutAlarm operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutAlarm for more information on using the PutAlarm
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutAlarmRequest method.
+// req, resp := client.PutAlarmRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutAlarm
+func (c *Lightsail) PutAlarmRequest(input *PutAlarmInput) (req *request.Request, output *PutAlarmOutput) {
+ op := &request.Operation{
+ Name: opPutAlarm,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PutAlarmInput{}
+ }
+
+ output = &PutAlarmOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutAlarm API operation for Amazon Lightsail.
+//
+// Creates or updates an alarm, and associates it with the specified metric.
+//
+// An alarm is used to monitor a single metric for one of your resources. When
+// a metric condition is met, the alarm can notify you by email, SMS text message,
+// and a banner displayed on the Amazon Lightsail console. For more information,
+// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms).
+//
+// When this action creates an alarm, the alarm state is immediately set to
+// INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately.
+// Any actions associated with the new state are then executed.
+//
+// When you update an existing alarm, its state is left unchanged, but the update
+// completely overwrites the previous configuration of the alarm. The alarm
+// is then evaluated with the updated configuration.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation PutAlarm for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutAlarm
+func (c *Lightsail) PutAlarm(input *PutAlarmInput) (*PutAlarmOutput, error) {
+ req, out := c.PutAlarmRequest(input)
+ return out, req.Send()
+}
+
+// PutAlarmWithContext is the same as PutAlarm with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutAlarm for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) PutAlarmWithContext(ctx aws.Context, input *PutAlarmInput, opts ...request.Option) (*PutAlarmOutput, error) {
+ req, out := c.PutAlarmRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutInstancePublicPorts = "PutInstancePublicPorts"
+
+// PutInstancePublicPortsRequest generates a "aws/request.Request" representing the
+// client's request for the PutInstancePublicPorts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutInstancePublicPorts for more information on using the PutInstancePublicPorts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutInstancePublicPortsRequest method.
+// req, resp := client.PutInstancePublicPortsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutInstancePublicPorts
+func (c *Lightsail) PutInstancePublicPortsRequest(input *PutInstancePublicPortsInput) (req *request.Request, output *PutInstancePublicPortsOutput) {
+ op := &request.Operation{
+ Name: opPutInstancePublicPorts,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PutInstancePublicPortsInput{}
+ }
+
+ output = &PutInstancePublicPortsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutInstancePublicPorts API operation for Amazon Lightsail.
+//
+// Opens ports for a specific Amazon Lightsail instance, and specifies the IP
+// addresses allowed to connect to the instance through the ports, and the protocol.
+// This action also closes all currently open ports that are not included in
+// the request. Include all of the ports and the protocols you want to open
+// in your PutInstancePublicPortsrequest. Or use the OpenInstancePublicPorts
+// action to open ports without closing currently open ports.
+//
+// The PutInstancePublicPorts action supports tag-based access control via resource
+// tags applied to the resource identified by instanceName. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation PutInstancePublicPorts for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutInstancePublicPorts
+func (c *Lightsail) PutInstancePublicPorts(input *PutInstancePublicPortsInput) (*PutInstancePublicPortsOutput, error) {
+ req, out := c.PutInstancePublicPortsRequest(input)
+ return out, req.Send()
+}
+
+// PutInstancePublicPortsWithContext is the same as PutInstancePublicPorts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutInstancePublicPorts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) PutInstancePublicPortsWithContext(ctx aws.Context, input *PutInstancePublicPortsInput, opts ...request.Option) (*PutInstancePublicPortsOutput, error) {
+ req, out := c.PutInstancePublicPortsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRebootInstance = "RebootInstance"
+
+// RebootInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the RebootInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RebootInstance for more information on using the RebootInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RebootInstanceRequest method.
+// req, resp := client.RebootInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootInstance
+func (c *Lightsail) RebootInstanceRequest(input *RebootInstanceInput) (req *request.Request, output *RebootInstanceOutput) {
+ op := &request.Operation{
+ Name: opRebootInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RebootInstanceInput{}
+ }
+
+ output = &RebootInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// RebootInstance API operation for Amazon Lightsail.
+//
+// Restarts a specific instance.
+//
+// The reboot instance operation supports tag-based access control via resource
+// tags applied to the resource identified by instance name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation RebootInstance for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootInstance
+func (c *Lightsail) RebootInstance(input *RebootInstanceInput) (*RebootInstanceOutput, error) {
+ req, out := c.RebootInstanceRequest(input)
+ return out, req.Send()
+}
+
+// RebootInstanceWithContext is the same as RebootInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RebootInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) RebootInstanceWithContext(ctx aws.Context, input *RebootInstanceInput, opts ...request.Option) (*RebootInstanceOutput, error) {
+ req, out := c.RebootInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRebootRelationalDatabase = "RebootRelationalDatabase"
+
+// RebootRelationalDatabaseRequest generates a "aws/request.Request" representing the
+// client's request for the RebootRelationalDatabase operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RebootRelationalDatabase for more information on using the RebootRelationalDatabase
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RebootRelationalDatabaseRequest method.
+// req, resp := client.RebootRelationalDatabaseRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootRelationalDatabase
+func (c *Lightsail) RebootRelationalDatabaseRequest(input *RebootRelationalDatabaseInput) (req *request.Request, output *RebootRelationalDatabaseOutput) {
+ op := &request.Operation{
+ Name: opRebootRelationalDatabase,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RebootRelationalDatabaseInput{}
+ }
+
+ output = &RebootRelationalDatabaseOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// RebootRelationalDatabase API operation for Amazon Lightsail.
+//
+// Restarts a specific database in Amazon Lightsail.
+//
+// The reboot relational database operation supports tag-based access control
+// via resource tags applied to the resource identified by relationalDatabaseName.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation RebootRelationalDatabase for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootRelationalDatabase
+func (c *Lightsail) RebootRelationalDatabase(input *RebootRelationalDatabaseInput) (*RebootRelationalDatabaseOutput, error) {
+ req, out := c.RebootRelationalDatabaseRequest(input)
+ return out, req.Send()
+}
+
+// RebootRelationalDatabaseWithContext is the same as RebootRelationalDatabase with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RebootRelationalDatabase for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) RebootRelationalDatabaseWithContext(ctx aws.Context, input *RebootRelationalDatabaseInput, opts ...request.Option) (*RebootRelationalDatabaseOutput, error) {
+ req, out := c.RebootRelationalDatabaseRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRegisterContainerImage = "RegisterContainerImage"
+
+// RegisterContainerImageRequest generates a "aws/request.Request" representing the
+// client's request for the RegisterContainerImage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RegisterContainerImage for more information on using the RegisterContainerImage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RegisterContainerImageRequest method.
+// req, resp := client.RegisterContainerImageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RegisterContainerImage
+func (c *Lightsail) RegisterContainerImageRequest(input *RegisterContainerImageInput) (req *request.Request, output *RegisterContainerImageOutput) {
+ op := &request.Operation{
+ Name: opRegisterContainerImage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RegisterContainerImageInput{}
+ }
+
+ output = &RegisterContainerImageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// RegisterContainerImage API operation for Amazon Lightsail.
+//
+// Registers a container image to your Amazon Lightsail container service.
+//
+// This action is not required if you install and use the Lightsail Control
+// (lightsailctl) plugin to push container images to your Lightsail container
+// service. For more information, see Pushing and managing container images
+// on your Amazon Lightsail container services (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-pushing-container-images)
+// in the Amazon Lightsail Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation RegisterContainerImage for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RegisterContainerImage
+func (c *Lightsail) RegisterContainerImage(input *RegisterContainerImageInput) (*RegisterContainerImageOutput, error) {
+ req, out := c.RegisterContainerImageRequest(input)
+ return out, req.Send()
+}
+
+// RegisterContainerImageWithContext is the same as RegisterContainerImage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RegisterContainerImage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) RegisterContainerImageWithContext(ctx aws.Context, input *RegisterContainerImageInput, opts ...request.Option) (*RegisterContainerImageOutput, error) {
+ req, out := c.RegisterContainerImageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opReleaseStaticIp = "ReleaseStaticIp"
+
+// ReleaseStaticIpRequest generates a "aws/request.Request" representing the
+// client's request for the ReleaseStaticIp operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ReleaseStaticIp for more information on using the ReleaseStaticIp
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ReleaseStaticIpRequest method.
+// req, resp := client.ReleaseStaticIpRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ReleaseStaticIp
+func (c *Lightsail) ReleaseStaticIpRequest(input *ReleaseStaticIpInput) (req *request.Request, output *ReleaseStaticIpOutput) {
+ op := &request.Operation{
+ Name: opReleaseStaticIp,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ReleaseStaticIpInput{}
+ }
+
+ output = &ReleaseStaticIpOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ReleaseStaticIp API operation for Amazon Lightsail.
+//
+// Deletes a specific static IP from your account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation ReleaseStaticIp for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ReleaseStaticIp
+func (c *Lightsail) ReleaseStaticIp(input *ReleaseStaticIpInput) (*ReleaseStaticIpOutput, error) {
+ req, out := c.ReleaseStaticIpRequest(input)
+ return out, req.Send()
+}
+
+// ReleaseStaticIpWithContext is the same as ReleaseStaticIp with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ReleaseStaticIp for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) ReleaseStaticIpWithContext(ctx aws.Context, input *ReleaseStaticIpInput, opts ...request.Option) (*ReleaseStaticIpOutput, error) {
+ req, out := c.ReleaseStaticIpRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opResetDistributionCache = "ResetDistributionCache"
+
+// ResetDistributionCacheRequest generates a "aws/request.Request" representing the
+// client's request for the ResetDistributionCache operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ResetDistributionCache for more information on using the ResetDistributionCache
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ResetDistributionCacheRequest method.
+// req, resp := client.ResetDistributionCacheRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ResetDistributionCache
+func (c *Lightsail) ResetDistributionCacheRequest(input *ResetDistributionCacheInput) (req *request.Request, output *ResetDistributionCacheOutput) {
+ op := &request.Operation{
+ Name: opResetDistributionCache,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ResetDistributionCacheInput{}
+ }
+
+ output = &ResetDistributionCacheOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ResetDistributionCache API operation for Amazon Lightsail.
+//
+// Deletes currently cached content from your Amazon Lightsail content delivery
+// network (CDN) distribution.
+//
+// After resetting the cache, the next time a content request is made, your
+// distribution pulls, serves, and caches it from the origin.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation ResetDistributionCache for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ResetDistributionCache
+func (c *Lightsail) ResetDistributionCache(input *ResetDistributionCacheInput) (*ResetDistributionCacheOutput, error) {
+ req, out := c.ResetDistributionCacheRequest(input)
+ return out, req.Send()
+}
+
+// ResetDistributionCacheWithContext is the same as ResetDistributionCache with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ResetDistributionCache for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) ResetDistributionCacheWithContext(ctx aws.Context, input *ResetDistributionCacheInput, opts ...request.Option) (*ResetDistributionCacheOutput, error) {
+ req, out := c.ResetDistributionCacheRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSendContactMethodVerification = "SendContactMethodVerification"
+
+// SendContactMethodVerificationRequest generates a "aws/request.Request" representing the
+// client's request for the SendContactMethodVerification operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SendContactMethodVerification for more information on using the SendContactMethodVerification
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SendContactMethodVerificationRequest method.
+// req, resp := client.SendContactMethodVerificationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SendContactMethodVerification
+func (c *Lightsail) SendContactMethodVerificationRequest(input *SendContactMethodVerificationInput) (req *request.Request, output *SendContactMethodVerificationOutput) {
+ op := &request.Operation{
+ Name: opSendContactMethodVerification,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &SendContactMethodVerificationInput{}
+ }
+
+ output = &SendContactMethodVerificationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// SendContactMethodVerification API operation for Amazon Lightsail.
+//
+// Sends a verification request to an email contact method to ensure it's owned
+// by the requester. SMS contact methods don't need to be verified.
+//
+// A contact method is used to send you notifications about your Amazon Lightsail
+// resources. You can add one email address and one mobile phone number contact
+// method in each AWS Region. However, SMS text messaging is not supported in
+// some AWS Regions, and SMS text messages cannot be sent to some countries/regions.
+// For more information, see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications).
+//
+// A verification request is sent to the contact method when you initially create
+// it. Use this action to send another verification request if a previous verification
+// request was deleted, or has expired.
+//
+// Notifications are not sent to an email contact method until after it is verified,
+// and confirmed as valid.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation SendContactMethodVerification for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SendContactMethodVerification
+func (c *Lightsail) SendContactMethodVerification(input *SendContactMethodVerificationInput) (*SendContactMethodVerificationOutput, error) {
+ req, out := c.SendContactMethodVerificationRequest(input)
+ return out, req.Send()
+}
+
+// SendContactMethodVerificationWithContext is the same as SendContactMethodVerification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SendContactMethodVerification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) SendContactMethodVerificationWithContext(ctx aws.Context, input *SendContactMethodVerificationInput, opts ...request.Option) (*SendContactMethodVerificationOutput, error) {
+ req, out := c.SendContactMethodVerificationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSetIpAddressType = "SetIpAddressType"
+
+// SetIpAddressTypeRequest generates a "aws/request.Request" representing the
+// client's request for the SetIpAddressType operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SetIpAddressType for more information on using the SetIpAddressType
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SetIpAddressTypeRequest method.
+// req, resp := client.SetIpAddressTypeRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SetIpAddressType
+func (c *Lightsail) SetIpAddressTypeRequest(input *SetIpAddressTypeInput) (req *request.Request, output *SetIpAddressTypeOutput) {
+ op := &request.Operation{
+ Name: opSetIpAddressType,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &SetIpAddressTypeInput{}
+ }
+
+ output = &SetIpAddressTypeOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// SetIpAddressType API operation for Amazon Lightsail.
+//
+// Sets the IP address type for an Amazon Lightsail resource.
+//
+// Use this action to enable dual-stack for a resource, which enables IPv4 and
+// IPv6 for the specified resource. Alternately, you can use this action to
+// disable dual-stack, and enable IPv4 only.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation SetIpAddressType for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SetIpAddressType
+func (c *Lightsail) SetIpAddressType(input *SetIpAddressTypeInput) (*SetIpAddressTypeOutput, error) {
+ req, out := c.SetIpAddressTypeRequest(input)
+ return out, req.Send()
+}
+
+// SetIpAddressTypeWithContext is the same as SetIpAddressType with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SetIpAddressType for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) SetIpAddressTypeWithContext(ctx aws.Context, input *SetIpAddressTypeInput, opts ...request.Option) (*SetIpAddressTypeOutput, error) {
+ req, out := c.SetIpAddressTypeRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSetResourceAccessForBucket = "SetResourceAccessForBucket"
+
+// SetResourceAccessForBucketRequest generates a "aws/request.Request" representing the
+// client's request for the SetResourceAccessForBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SetResourceAccessForBucket for more information on using the SetResourceAccessForBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SetResourceAccessForBucketRequest method.
+// req, resp := client.SetResourceAccessForBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SetResourceAccessForBucket
+func (c *Lightsail) SetResourceAccessForBucketRequest(input *SetResourceAccessForBucketInput) (req *request.Request, output *SetResourceAccessForBucketOutput) {
+ op := &request.Operation{
+ Name: opSetResourceAccessForBucket,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &SetResourceAccessForBucketInput{}
+ }
+
+ output = &SetResourceAccessForBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// SetResourceAccessForBucket API operation for Amazon Lightsail.
+//
+// Sets the Amazon Lightsail resources that can access the specified Lightsail
+// bucket.
+//
+// Lightsail buckets currently support setting access for Lightsail instances
+// in the same AWS Region.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation SetResourceAccessForBucket for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SetResourceAccessForBucket
+func (c *Lightsail) SetResourceAccessForBucket(input *SetResourceAccessForBucketInput) (*SetResourceAccessForBucketOutput, error) {
+ req, out := c.SetResourceAccessForBucketRequest(input)
+ return out, req.Send()
+}
+
+// SetResourceAccessForBucketWithContext is the same as SetResourceAccessForBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SetResourceAccessForBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) SetResourceAccessForBucketWithContext(ctx aws.Context, input *SetResourceAccessForBucketInput, opts ...request.Option) (*SetResourceAccessForBucketOutput, error) {
+ req, out := c.SetResourceAccessForBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opStartInstance = "StartInstance"
+
+// StartInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the StartInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See StartInstance for more information on using the StartInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the StartInstanceRequest method.
+// req, resp := client.StartInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartInstance
+func (c *Lightsail) StartInstanceRequest(input *StartInstanceInput) (req *request.Request, output *StartInstanceOutput) {
+ op := &request.Operation{
+ Name: opStartInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &StartInstanceInput{}
+ }
+
+ output = &StartInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// StartInstance API operation for Amazon Lightsail.
+//
+// Starts a specific Amazon Lightsail instance from a stopped state. To restart
+// an instance, use the reboot instance operation.
+//
+// When you start a stopped instance, Lightsail assigns a new public IP address
+// to the instance. To use the same IP address after stopping and starting an
+// instance, create a static IP address and attach it to the instance. For more
+// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/lightsail-create-static-ip).
+//
+// The start instance operation supports tag-based access control via resource
+// tags applied to the resource identified by instance name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation StartInstance for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartInstance
+func (c *Lightsail) StartInstance(input *StartInstanceInput) (*StartInstanceOutput, error) {
+ req, out := c.StartInstanceRequest(input)
+ return out, req.Send()
+}
+
+// StartInstanceWithContext is the same as StartInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See StartInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) StartInstanceWithContext(ctx aws.Context, input *StartInstanceInput, opts ...request.Option) (*StartInstanceOutput, error) {
+ req, out := c.StartInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opStartRelationalDatabase = "StartRelationalDatabase"
+
+// StartRelationalDatabaseRequest generates a "aws/request.Request" representing the
+// client's request for the StartRelationalDatabase operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See StartRelationalDatabase for more information on using the StartRelationalDatabase
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the StartRelationalDatabaseRequest method.
+// req, resp := client.StartRelationalDatabaseRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartRelationalDatabase
+func (c *Lightsail) StartRelationalDatabaseRequest(input *StartRelationalDatabaseInput) (req *request.Request, output *StartRelationalDatabaseOutput) {
+ op := &request.Operation{
+ Name: opStartRelationalDatabase,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &StartRelationalDatabaseInput{}
+ }
+
+ output = &StartRelationalDatabaseOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// StartRelationalDatabase API operation for Amazon Lightsail.
+//
+// Starts a specific database from a stopped state in Amazon Lightsail. To restart
+// a database, use the reboot relational database operation.
+//
+// The start relational database operation supports tag-based access control
+// via resource tags applied to the resource identified by relationalDatabaseName.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation StartRelationalDatabase for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartRelationalDatabase
+func (c *Lightsail) StartRelationalDatabase(input *StartRelationalDatabaseInput) (*StartRelationalDatabaseOutput, error) {
+ req, out := c.StartRelationalDatabaseRequest(input)
+ return out, req.Send()
+}
+
+// StartRelationalDatabaseWithContext is the same as StartRelationalDatabase with the addition of
+// the ability to pass a context and additional request options.
+//
+// See StartRelationalDatabase for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) StartRelationalDatabaseWithContext(ctx aws.Context, input *StartRelationalDatabaseInput, opts ...request.Option) (*StartRelationalDatabaseOutput, error) {
+ req, out := c.StartRelationalDatabaseRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opStopInstance = "StopInstance"
+
+// StopInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the StopInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See StopInstance for more information on using the StopInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the StopInstanceRequest method.
+// req, resp := client.StopInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopInstance
+func (c *Lightsail) StopInstanceRequest(input *StopInstanceInput) (req *request.Request, output *StopInstanceOutput) {
+ op := &request.Operation{
+ Name: opStopInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &StopInstanceInput{}
+ }
+
+ output = &StopInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// StopInstance API operation for Amazon Lightsail.
+//
+// Stops a specific Amazon Lightsail instance that is currently running.
+//
+// When you start a stopped instance, Lightsail assigns a new public IP address
+// to the instance. To use the same IP address after stopping and starting an
+// instance, create a static IP address and attach it to the instance. For more
+// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/lightsail-create-static-ip).
+//
+// The stop instance operation supports tag-based access control via resource
+// tags applied to the resource identified by instance name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation StopInstance for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopInstance
+func (c *Lightsail) StopInstance(input *StopInstanceInput) (*StopInstanceOutput, error) {
+ req, out := c.StopInstanceRequest(input)
+ return out, req.Send()
+}
+
+// StopInstanceWithContext is the same as StopInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See StopInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) StopInstanceWithContext(ctx aws.Context, input *StopInstanceInput, opts ...request.Option) (*StopInstanceOutput, error) {
+ req, out := c.StopInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opStopRelationalDatabase = "StopRelationalDatabase"
+
+// StopRelationalDatabaseRequest generates a "aws/request.Request" representing the
+// client's request for the StopRelationalDatabase operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See StopRelationalDatabase for more information on using the StopRelationalDatabase
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the StopRelationalDatabaseRequest method.
+// req, resp := client.StopRelationalDatabaseRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopRelationalDatabase
+func (c *Lightsail) StopRelationalDatabaseRequest(input *StopRelationalDatabaseInput) (req *request.Request, output *StopRelationalDatabaseOutput) {
+ op := &request.Operation{
+ Name: opStopRelationalDatabase,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &StopRelationalDatabaseInput{}
+ }
+
+ output = &StopRelationalDatabaseOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// StopRelationalDatabase API operation for Amazon Lightsail.
+//
+// Stops a specific database that is currently running in Amazon Lightsail.
+//
+// The stop relational database operation supports tag-based access control
+// via resource tags applied to the resource identified by relationalDatabaseName.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation StopRelationalDatabase for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopRelationalDatabase
+func (c *Lightsail) StopRelationalDatabase(input *StopRelationalDatabaseInput) (*StopRelationalDatabaseOutput, error) {
+ req, out := c.StopRelationalDatabaseRequest(input)
+ return out, req.Send()
+}
+
+// StopRelationalDatabaseWithContext is the same as StopRelationalDatabase with the addition of
+// the ability to pass a context and additional request options.
+//
+// See StopRelationalDatabase for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) StopRelationalDatabaseWithContext(ctx aws.Context, input *StopRelationalDatabaseInput, opts ...request.Option) (*StopRelationalDatabaseOutput, error) {
+ req, out := c.StopRelationalDatabaseRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opTagResource = "TagResource"
+
+// TagResourceRequest generates a "aws/request.Request" representing the
+// client's request for the TagResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See TagResource for more information on using the TagResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the TagResourceRequest method.
+// req, resp := client.TagResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TagResource
+func (c *Lightsail) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
+ op := &request.Operation{
+ Name: opTagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TagResourceInput{}
+ }
+
+ output = &TagResourceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// TagResource API operation for Amazon Lightsail.
+//
+// Adds one or more tags to the specified Amazon Lightsail resource. Each resource
+// can have a maximum of 50 tags. Each tag consists of a key and an optional
+// value. Tag keys must be unique per resource. For more information about tags,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+//
+// The tag resource operation supports tag-based access control via request
+// tags and resource tags applied to the resource identified by resource name.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation TagResource for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TagResource
+func (c *Lightsail) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
+ req, out := c.TagResourceRequest(input)
+ return out, req.Send()
+}
+
+// TagResourceWithContext is the same as TagResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See TagResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
+ req, out := c.TagResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opTestAlarm = "TestAlarm"
+
+// TestAlarmRequest generates a "aws/request.Request" representing the
+// client's request for the TestAlarm operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See TestAlarm for more information on using the TestAlarm
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the TestAlarmRequest method.
+// req, resp := client.TestAlarmRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TestAlarm
+func (c *Lightsail) TestAlarmRequest(input *TestAlarmInput) (req *request.Request, output *TestAlarmOutput) {
+ op := &request.Operation{
+ Name: opTestAlarm,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TestAlarmInput{}
+ }
+
+ output = &TestAlarmOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// TestAlarm API operation for Amazon Lightsail.
+//
+// Tests an alarm by displaying a banner on the Amazon Lightsail console. If
+// a notification trigger is configured for the specified alarm, the test also
+// sends a notification to the notification protocol (Email and/or SMS) configured
+// for the alarm.
+//
+// An alarm is used to monitor a single metric for one of your resources. When
+// a metric condition is met, the alarm can notify you by email, SMS text message,
+// and a banner displayed on the Amazon Lightsail console. For more information,
+// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation TestAlarm for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TestAlarm
+func (c *Lightsail) TestAlarm(input *TestAlarmInput) (*TestAlarmOutput, error) {
+ req, out := c.TestAlarmRequest(input)
+ return out, req.Send()
+}
+
+// TestAlarmWithContext is the same as TestAlarm with the addition of
+// the ability to pass a context and additional request options.
+//
+// See TestAlarm for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) TestAlarmWithContext(ctx aws.Context, input *TestAlarmInput, opts ...request.Option) (*TestAlarmOutput, error) {
+ req, out := c.TestAlarmRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUnpeerVpc = "UnpeerVpc"
+
+// UnpeerVpcRequest generates a "aws/request.Request" representing the
+// client's request for the UnpeerVpc operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UnpeerVpc for more information on using the UnpeerVpc
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UnpeerVpcRequest method.
+// req, resp := client.UnpeerVpcRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UnpeerVpc
+func (c *Lightsail) UnpeerVpcRequest(input *UnpeerVpcInput) (req *request.Request, output *UnpeerVpcOutput) {
+ op := &request.Operation{
+ Name: opUnpeerVpc,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UnpeerVpcInput{}
+ }
+
+ output = &UnpeerVpcOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UnpeerVpc API operation for Amazon Lightsail.
+//
+// Unpeers the Lightsail VPC from the user's default VPC.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UnpeerVpc for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UnpeerVpc
+func (c *Lightsail) UnpeerVpc(input *UnpeerVpcInput) (*UnpeerVpcOutput, error) {
+ req, out := c.UnpeerVpcRequest(input)
+ return out, req.Send()
+}
+
+// UnpeerVpcWithContext is the same as UnpeerVpc with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UnpeerVpc for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UnpeerVpcWithContext(ctx aws.Context, input *UnpeerVpcInput, opts ...request.Option) (*UnpeerVpcOutput, error) {
+ req, out := c.UnpeerVpcRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUntagResource = "UntagResource"
+
+// UntagResourceRequest generates a "aws/request.Request" representing the
+// client's request for the UntagResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UntagResource for more information on using the UntagResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UntagResourceRequest method.
+// req, resp := client.UntagResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UntagResource
+func (c *Lightsail) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
+ op := &request.Operation{
+ Name: opUntagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UntagResourceInput{}
+ }
+
+ output = &UntagResourceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UntagResource API operation for Amazon Lightsail.
+//
+// Deletes the specified set of tag keys and their values from the specified
+// Amazon Lightsail resource.
+//
+// The untag resource operation supports tag-based access control via request
+// tags and resource tags applied to the resource identified by resource name.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UntagResource for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UntagResource
+func (c *Lightsail) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
+ req, out := c.UntagResourceRequest(input)
+ return out, req.Send()
+}
+
+// UntagResourceWithContext is the same as UntagResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UntagResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
+ req, out := c.UntagResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateBucket = "UpdateBucket"
+
+// UpdateBucketRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateBucket for more information on using the UpdateBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateBucketRequest method.
+// req, resp := client.UpdateBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucket
+func (c *Lightsail) UpdateBucketRequest(input *UpdateBucketInput) (req *request.Request, output *UpdateBucketOutput) {
+ op := &request.Operation{
+ Name: opUpdateBucket,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateBucketInput{}
+ }
+
+ output = &UpdateBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateBucket API operation for Amazon Lightsail.
+//
+// Updates an existing Amazon Lightsail bucket.
+//
+// Use this action to update the configuration of an existing bucket, such as
+// versioning, public accessibility, and the AWS accounts that can access the
+// bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateBucket for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucket
+func (c *Lightsail) UpdateBucket(input *UpdateBucketInput) (*UpdateBucketOutput, error) {
+ req, out := c.UpdateBucketRequest(input)
+ return out, req.Send()
+}
+
+// UpdateBucketWithContext is the same as UpdateBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateBucketWithContext(ctx aws.Context, input *UpdateBucketInput, opts ...request.Option) (*UpdateBucketOutput, error) {
+ req, out := c.UpdateBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateBucketBundle = "UpdateBucketBundle"
+
+// UpdateBucketBundleRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateBucketBundle operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateBucketBundle for more information on using the UpdateBucketBundle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateBucketBundleRequest method.
+// req, resp := client.UpdateBucketBundleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucketBundle
+func (c *Lightsail) UpdateBucketBundleRequest(input *UpdateBucketBundleInput) (req *request.Request, output *UpdateBucketBundleOutput) {
+ op := &request.Operation{
+ Name: opUpdateBucketBundle,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateBucketBundleInput{}
+ }
+
+ output = &UpdateBucketBundleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateBucketBundle API operation for Amazon Lightsail.
+//
+// Updates the bundle, or storage plan, of an existing Amazon Lightsail bucket.
+//
+// A bucket bundle specifies the monthly cost, storage space, and data transfer
+// quota for a bucket. You can update a bucket's bundle only one time within
+// a monthly AWS billing cycle. To determine if you can update a bucket's bundle,
+// use the GetBuckets action. The ableToUpdateBundle parameter in the response
+// will indicate whether you can currently update a bucket's bundle.
+//
+// Update a bucket's bundle if it's consistently going over its storage space
+// or data transfer quota, or if a bucket's usage is consistently in the lower
+// range of its storage space or data transfer quota. Due to the unpredictable
+// usage fluctuations that a bucket might experience, we strongly recommend
+// that you update a bucket's bundle only as a long-term strategy, instead of
+// as a short-term, monthly cost-cutting measure. Choose a bucket bundle that
+// will provide the bucket with ample storage space and data transfer for a
+// long time to come.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateBucketBundle for usage and error information.
+//
+// Returned Error Types:
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * ServiceException
+// A general service exception.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucketBundle
+func (c *Lightsail) UpdateBucketBundle(input *UpdateBucketBundleInput) (*UpdateBucketBundleOutput, error) {
+ req, out := c.UpdateBucketBundleRequest(input)
+ return out, req.Send()
+}
+
+// UpdateBucketBundleWithContext is the same as UpdateBucketBundle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateBucketBundle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateBucketBundleWithContext(ctx aws.Context, input *UpdateBucketBundleInput, opts ...request.Option) (*UpdateBucketBundleOutput, error) {
+ req, out := c.UpdateBucketBundleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateContainerService = "UpdateContainerService"
+
+// UpdateContainerServiceRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateContainerService operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateContainerService for more information on using the UpdateContainerService
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateContainerServiceRequest method.
+// req, resp := client.UpdateContainerServiceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateContainerService
+func (c *Lightsail) UpdateContainerServiceRequest(input *UpdateContainerServiceInput) (req *request.Request, output *UpdateContainerServiceOutput) {
+ op := &request.Operation{
+ Name: opUpdateContainerService,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateContainerServiceInput{}
+ }
+
+ output = &UpdateContainerServiceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateContainerService API operation for Amazon Lightsail.
+//
+// Updates the configuration of your Amazon Lightsail container service, such
+// as its power, scale, and public domain names.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateContainerService for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateContainerService
+func (c *Lightsail) UpdateContainerService(input *UpdateContainerServiceInput) (*UpdateContainerServiceOutput, error) {
+ req, out := c.UpdateContainerServiceRequest(input)
+ return out, req.Send()
+}
+
+// UpdateContainerServiceWithContext is the same as UpdateContainerService with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateContainerService for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateContainerServiceWithContext(ctx aws.Context, input *UpdateContainerServiceInput, opts ...request.Option) (*UpdateContainerServiceOutput, error) {
+ req, out := c.UpdateContainerServiceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateDistribution = "UpdateDistribution"
+
+// UpdateDistributionRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateDistribution operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateDistribution for more information on using the UpdateDistribution
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateDistributionRequest method.
+// req, resp := client.UpdateDistributionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution
+func (c *Lightsail) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) {
+ op := &request.Operation{
+ Name: opUpdateDistribution,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateDistributionInput{}
+ }
+
+ output = &UpdateDistributionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateDistribution API operation for Amazon Lightsail.
+//
+// Updates an existing Amazon Lightsail content delivery network (CDN) distribution.
+//
+// Use this action to update the configuration of your existing distribution.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateDistribution for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution
+func (c *Lightsail) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) {
+ req, out := c.UpdateDistributionRequest(input)
+ return out, req.Send()
+}
+
+// UpdateDistributionWithContext is the same as UpdateDistribution with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateDistribution for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateDistributionWithContext(ctx aws.Context, input *UpdateDistributionInput, opts ...request.Option) (*UpdateDistributionOutput, error) {
+ req, out := c.UpdateDistributionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateDistributionBundle = "UpdateDistributionBundle"
+
+// UpdateDistributionBundleRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateDistributionBundle operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateDistributionBundle for more information on using the UpdateDistributionBundle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateDistributionBundleRequest method.
+// req, resp := client.UpdateDistributionBundleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistributionBundle
+func (c *Lightsail) UpdateDistributionBundleRequest(input *UpdateDistributionBundleInput) (req *request.Request, output *UpdateDistributionBundleOutput) {
+ op := &request.Operation{
+ Name: opUpdateDistributionBundle,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateDistributionBundleInput{}
+ }
+
+ output = &UpdateDistributionBundleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateDistributionBundle API operation for Amazon Lightsail.
+//
+// Updates the bundle of your Amazon Lightsail content delivery network (CDN)
+// distribution.
+//
+// A distribution bundle specifies the monthly network transfer quota and monthly
+// cost of your dsitribution.
+//
+// Update your distribution's bundle if your distribution is going over its
+// monthly network transfer quota and is incurring an overage fee.
+//
+// You can update your distribution's bundle only one time within your monthly
+// AWS billing cycle. To determine if you can update your distribution's bundle,
+// use the GetDistributions action. The ableToUpdateBundle parameter in the
+// result will indicate whether you can currently update your distribution's
+// bundle.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateDistributionBundle for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistributionBundle
+func (c *Lightsail) UpdateDistributionBundle(input *UpdateDistributionBundleInput) (*UpdateDistributionBundleOutput, error) {
+ req, out := c.UpdateDistributionBundleRequest(input)
+ return out, req.Send()
+}
+
+// UpdateDistributionBundleWithContext is the same as UpdateDistributionBundle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateDistributionBundle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateDistributionBundleWithContext(ctx aws.Context, input *UpdateDistributionBundleInput, opts ...request.Option) (*UpdateDistributionBundleOutput, error) {
+ req, out := c.UpdateDistributionBundleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateDomainEntry = "UpdateDomainEntry"
+
+// UpdateDomainEntryRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateDomainEntry operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateDomainEntry for more information on using the UpdateDomainEntry
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateDomainEntryRequest method.
+// req, resp := client.UpdateDomainEntryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDomainEntry
+func (c *Lightsail) UpdateDomainEntryRequest(input *UpdateDomainEntryInput) (req *request.Request, output *UpdateDomainEntryOutput) {
+ op := &request.Operation{
+ Name: opUpdateDomainEntry,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateDomainEntryInput{}
+ }
+
+ output = &UpdateDomainEntryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateDomainEntry API operation for Amazon Lightsail.
+//
+// Updates a domain recordset after it is created.
+//
+// The update domain entry operation supports tag-based access control via resource
+// tags applied to the resource identified by domain name. For more information,
+// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateDomainEntry for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDomainEntry
+func (c *Lightsail) UpdateDomainEntry(input *UpdateDomainEntryInput) (*UpdateDomainEntryOutput, error) {
+ req, out := c.UpdateDomainEntryRequest(input)
+ return out, req.Send()
+}
+
+// UpdateDomainEntryWithContext is the same as UpdateDomainEntry with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateDomainEntry for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateDomainEntryWithContext(ctx aws.Context, input *UpdateDomainEntryInput, opts ...request.Option) (*UpdateDomainEntryOutput, error) {
+ req, out := c.UpdateDomainEntryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateLoadBalancerAttribute = "UpdateLoadBalancerAttribute"
+
+// UpdateLoadBalancerAttributeRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateLoadBalancerAttribute operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateLoadBalancerAttribute for more information on using the UpdateLoadBalancerAttribute
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateLoadBalancerAttributeRequest method.
+// req, resp := client.UpdateLoadBalancerAttributeRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateLoadBalancerAttribute
+func (c *Lightsail) UpdateLoadBalancerAttributeRequest(input *UpdateLoadBalancerAttributeInput) (req *request.Request, output *UpdateLoadBalancerAttributeOutput) {
+ op := &request.Operation{
+ Name: opUpdateLoadBalancerAttribute,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateLoadBalancerAttributeInput{}
+ }
+
+ output = &UpdateLoadBalancerAttributeOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateLoadBalancerAttribute API operation for Amazon Lightsail.
+//
+// Updates the specified attribute for a load balancer. You can only update
+// one attribute at a time.
+//
+// The update load balancer attribute operation supports tag-based access control
+// via resource tags applied to the resource identified by load balancer name.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateLoadBalancerAttribute for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateLoadBalancerAttribute
+func (c *Lightsail) UpdateLoadBalancerAttribute(input *UpdateLoadBalancerAttributeInput) (*UpdateLoadBalancerAttributeOutput, error) {
+ req, out := c.UpdateLoadBalancerAttributeRequest(input)
+ return out, req.Send()
+}
+
+// UpdateLoadBalancerAttributeWithContext is the same as UpdateLoadBalancerAttribute with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateLoadBalancerAttribute for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateLoadBalancerAttributeWithContext(ctx aws.Context, input *UpdateLoadBalancerAttributeInput, opts ...request.Option) (*UpdateLoadBalancerAttributeOutput, error) {
+ req, out := c.UpdateLoadBalancerAttributeRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateRelationalDatabase = "UpdateRelationalDatabase"
+
+// UpdateRelationalDatabaseRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateRelationalDatabase operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateRelationalDatabase for more information on using the UpdateRelationalDatabase
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateRelationalDatabaseRequest method.
+// req, resp := client.UpdateRelationalDatabaseRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabase
+func (c *Lightsail) UpdateRelationalDatabaseRequest(input *UpdateRelationalDatabaseInput) (req *request.Request, output *UpdateRelationalDatabaseOutput) {
+ op := &request.Operation{
+ Name: opUpdateRelationalDatabase,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateRelationalDatabaseInput{}
+ }
+
+ output = &UpdateRelationalDatabaseOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateRelationalDatabase API operation for Amazon Lightsail.
+//
+// Allows the update of one or more attributes of a database in Amazon Lightsail.
+//
+// Updates are applied immediately, or in cases where the updates could result
+// in an outage, are applied during the database's predefined maintenance window.
+//
+// The update relational database operation supports tag-based access control
+// via resource tags applied to the resource identified by relationalDatabaseName.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateRelationalDatabase for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabase
+func (c *Lightsail) UpdateRelationalDatabase(input *UpdateRelationalDatabaseInput) (*UpdateRelationalDatabaseOutput, error) {
+ req, out := c.UpdateRelationalDatabaseRequest(input)
+ return out, req.Send()
+}
+
+// UpdateRelationalDatabaseWithContext is the same as UpdateRelationalDatabase with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateRelationalDatabase for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateRelationalDatabaseWithContext(ctx aws.Context, input *UpdateRelationalDatabaseInput, opts ...request.Option) (*UpdateRelationalDatabaseOutput, error) {
+ req, out := c.UpdateRelationalDatabaseRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateRelationalDatabaseParameters = "UpdateRelationalDatabaseParameters"
+
+// UpdateRelationalDatabaseParametersRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateRelationalDatabaseParameters operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateRelationalDatabaseParameters for more information on using the UpdateRelationalDatabaseParameters
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateRelationalDatabaseParametersRequest method.
+// req, resp := client.UpdateRelationalDatabaseParametersRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabaseParameters
+func (c *Lightsail) UpdateRelationalDatabaseParametersRequest(input *UpdateRelationalDatabaseParametersInput) (req *request.Request, output *UpdateRelationalDatabaseParametersOutput) {
+ op := &request.Operation{
+ Name: opUpdateRelationalDatabaseParameters,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateRelationalDatabaseParametersInput{}
+ }
+
+ output = &UpdateRelationalDatabaseParametersOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UpdateRelationalDatabaseParameters API operation for Amazon Lightsail.
+//
+// Allows the update of one or more parameters of a database in Amazon Lightsail.
+//
+// Parameter updates don't cause outages; therefore, their application is not
+// subject to the preferred maintenance window. However, there are two ways
+// in which parameter updates are applied: dynamic or pending-reboot. Parameters
+// marked with a dynamic apply type are applied immediately. Parameters marked
+// with a pending-reboot apply type are applied only after the database is rebooted
+// using the reboot relational database operation.
+//
+// The update relational database parameters operation supports tag-based access
+// control via resource tags applied to the resource identified by relationalDatabaseName.
+// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Lightsail's
+// API operation UpdateRelationalDatabaseParameters for usage and error information.
+//
+// Returned Error Types:
+// * ServiceException
+// A general service exception.
+//
+// * InvalidInputException
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+//
+// * NotFoundException
+// Lightsail throws this exception when it cannot find a resource.
+//
+// * OperationFailureException
+// Lightsail throws this exception when an operation fails to execute.
+//
+// * AccessDeniedException
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+//
+// * AccountSetupInProgressException
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+//
+// * UnauthenticatedException
+// Lightsail throws this exception when the user has not been authenticated.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabaseParameters
+func (c *Lightsail) UpdateRelationalDatabaseParameters(input *UpdateRelationalDatabaseParametersInput) (*UpdateRelationalDatabaseParametersOutput, error) {
+ req, out := c.UpdateRelationalDatabaseParametersRequest(input)
+ return out, req.Send()
+}
+
+// UpdateRelationalDatabaseParametersWithContext is the same as UpdateRelationalDatabaseParameters with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateRelationalDatabaseParameters for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *Lightsail) UpdateRelationalDatabaseParametersWithContext(ctx aws.Context, input *UpdateRelationalDatabaseParametersInput, opts ...request.Option) (*UpdateRelationalDatabaseParametersOutput, error) {
+ req, out := c.UpdateRelationalDatabaseParametersRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Lightsail throws this exception when the user cannot be authenticated or
+// uses invalid credentials to access a resource.
+type AccessDeniedException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Code_ *string `locationName:"code" type:"string"`
+
+ Docs *string `locationName:"docs" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ Tip *string `locationName:"tip" type:"string"`
+}
+
+// String returns the string representation
+func (s AccessDeniedException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessDeniedException) GoString() string {
+ return s.String()
+}
+
+func newErrorAccessDeniedException(v protocol.ResponseMetadata) error {
+ return &AccessDeniedException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *AccessDeniedException) Code() string {
+ return "AccessDeniedException"
+}
+
+// Message returns the exception's message.
+func (s *AccessDeniedException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *AccessDeniedException) OrigErr() error {
+ return nil
+}
+
+func (s *AccessDeniedException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *AccessDeniedException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *AccessDeniedException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Describes an access key for an Amazon Lightsail bucket.
+//
+// Access keys grant full programmatic access to the specified bucket and its
+// objects. You can have a maximum of two access keys per bucket. Use the CreateBucketAccessKey
+// action to create an access key for a specific bucket. For more information
+// about access keys, see Creating access keys for a bucket in Amazon Lightsail
+// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-bucket-access-keys)
+// in the Amazon Lightsail Developer Guide.
+//
+// The secretAccessKey value is returned only in response to the CreateBucketAccessKey
+// action. You can get a secret access key only when you first create an access
+// key; you cannot get the secret access key later. If you lose the secret access
+// key, you must create a new access key.
+type AccessKey struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the access key.
+ AccessKeyId *string `locationName:"accessKeyId" min:"20" type:"string" sensitive:"true"`
+
+ // The timestamp when the access key was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // An object that describes the last time the access key was used.
+ //
+ // This object does not include data in the response of a CreateBucketAccessKey
+ // action. If the access key has not been used, the region and serviceName values
+ // are N/A, and the lastUsedDate value is null.
+ LastUsed *AccessKeyLastUsed `locationName:"lastUsed" type:"structure"`
+
+ // The secret access key used to sign requests.
+ //
+ // You should store the secret access key in a safe location. We recommend that
+ // you delete the access key if the secret access key is compromised.
+ SecretAccessKey *string `locationName:"secretAccessKey" type:"string"`
+
+ // The status of the access key.
+ //
+ // A status of Active means that the key is valid, while Inactive means it is
+ // not.
+ Status *string `locationName:"status" type:"string" enum:"StatusType"`
+}
+
+// String returns the string representation
+func (s AccessKey) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessKey) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *AccessKey) SetAccessKeyId(v string) *AccessKey {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *AccessKey) SetCreatedAt(v time.Time) *AccessKey {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetLastUsed sets the LastUsed field's value.
+func (s *AccessKey) SetLastUsed(v *AccessKeyLastUsed) *AccessKey {
+ s.LastUsed = v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *AccessKey) SetSecretAccessKey(v string) *AccessKey {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *AccessKey) SetStatus(v string) *AccessKey {
+ s.Status = &v
+ return s
+}
+
+// Describes the last time an access key was used.
+//
+// This object does not include data in the response of a CreateBucketAccessKey
+// action.
+type AccessKeyLastUsed struct {
+ _ struct{} `type:"structure"`
+
+ // The date and time when the access key was most recently used.
+ //
+ // This value is null if the access key has not been used.
+ LastUsedDate *time.Time `locationName:"lastUsedDate" type:"timestamp"`
+
+ // The AWS Region where this access key was most recently used.
+ //
+ // This value is N/A if the access key has not been used.
+ Region *string `locationName:"region" type:"string"`
+
+ // The name of the AWS service with which this access key was most recently
+ // used.
+ //
+ // This value is N/A if the access key has not been used.
+ ServiceName *string `locationName:"serviceName" type:"string"`
+}
+
+// String returns the string representation
+func (s AccessKeyLastUsed) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessKeyLastUsed) GoString() string {
+ return s.String()
+}
+
+// SetLastUsedDate sets the LastUsedDate field's value.
+func (s *AccessKeyLastUsed) SetLastUsedDate(v time.Time) *AccessKeyLastUsed {
+ s.LastUsedDate = &v
+ return s
+}
+
+// SetRegion sets the Region field's value.
+func (s *AccessKeyLastUsed) SetRegion(v string) *AccessKeyLastUsed {
+ s.Region = &v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *AccessKeyLastUsed) SetServiceName(v string) *AccessKeyLastUsed {
+ s.ServiceName = &v
+ return s
+}
+
+// Describes the anonymous access permissions for an Amazon Lightsail bucket
+// and its objects.
+//
+// For more information about bucket access permissions, see Understanding bucket
+// permissions in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-understanding-bucket-permissions)
+// in the
+//
+// Amazon Lightsail Developer Guide.
+type AccessRules struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value that indicates whether the access control list (ACL) permissions
+ // that are applied to individual objects override the getObject option that
+ // is currently specified.
+ //
+ // When this is true, you can use the PutObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html)
+ // Amazon S3 API action to set individual objects to public (read-only) using
+ // the public-read ACL, or to private using the private ACL.
+ AllowPublicOverrides *bool `locationName:"allowPublicOverrides" type:"boolean"`
+
+ // Specifies the anonymous access to all objects in a bucket.
+ //
+ // The following options can be specified:
+ //
+ // * public - Sets all objects in the bucket to public (read-only), making
+ // them readable by anyone in the world. If the getObject value is set to
+ // public, then all objects in the bucket default to public regardless of
+ // the allowPublicOverrides value.
+ //
+ // * private - Sets all objects in the bucket to private, making them readable
+ // only by you or anyone you give access to. If the getObject value is set
+ // to private, and the allowPublicOverrides value is set to true, then all
+ // objects in the bucket default to private unless they are configured with
+ // a public-read ACL. Individual objects with a public-read ACL are readable
+ // by anyone in the world.
+ GetObject *string `locationName:"getObject" type:"string" enum:"AccessType"`
+}
+
+// String returns the string representation
+func (s AccessRules) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessRules) GoString() string {
+ return s.String()
+}
+
+// SetAllowPublicOverrides sets the AllowPublicOverrides field's value.
+func (s *AccessRules) SetAllowPublicOverrides(v bool) *AccessRules {
+ s.AllowPublicOverrides = &v
+ return s
+}
+
+// SetGetObject sets the GetObject field's value.
+func (s *AccessRules) SetGetObject(v string) *AccessRules {
+ s.GetObject = &v
+ return s
+}
+
+// Lightsail throws this exception when an account is still in the setup in
+// progress state.
+type AccountSetupInProgressException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Code_ *string `locationName:"code" type:"string"`
+
+ Docs *string `locationName:"docs" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ Tip *string `locationName:"tip" type:"string"`
+}
+
+// String returns the string representation
+func (s AccountSetupInProgressException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccountSetupInProgressException) GoString() string {
+ return s.String()
+}
+
+func newErrorAccountSetupInProgressException(v protocol.ResponseMetadata) error {
+ return &AccountSetupInProgressException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *AccountSetupInProgressException) Code() string {
+ return "AccountSetupInProgressException"
+}
+
+// Message returns the exception's message.
+func (s *AccountSetupInProgressException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *AccountSetupInProgressException) OrigErr() error {
+ return nil
+}
+
+func (s *AccountSetupInProgressException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *AccountSetupInProgressException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *AccountSetupInProgressException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Describes an add-on that is enabled for an Amazon Lightsail resource.
+type AddOn struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the add-on.
+ Name *string `locationName:"name" type:"string"`
+
+ // The next daily time an automatic snapshot will be created.
+ //
+ // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC).
+ //
+ // The snapshot is automatically created between the time shown and up to 45
+ // minutes after.
+ NextSnapshotTimeOfDay *string `locationName:"nextSnapshotTimeOfDay" type:"string"`
+
+ // The daily time when an automatic snapshot is created.
+ //
+ // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC).
+ //
+ // The snapshot is automatically created between the time shown and up to 45
+ // minutes after.
+ SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"`
+
+ // The status of the add-on.
+ Status *string `locationName:"status" type:"string"`
+}
+
+// String returns the string representation
+func (s AddOn) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AddOn) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *AddOn) SetName(v string) *AddOn {
+ s.Name = &v
+ return s
+}
+
+// SetNextSnapshotTimeOfDay sets the NextSnapshotTimeOfDay field's value.
+func (s *AddOn) SetNextSnapshotTimeOfDay(v string) *AddOn {
+ s.NextSnapshotTimeOfDay = &v
+ return s
+}
+
+// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value.
+func (s *AddOn) SetSnapshotTimeOfDay(v string) *AddOn {
+ s.SnapshotTimeOfDay = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *AddOn) SetStatus(v string) *AddOn {
+ s.Status = &v
+ return s
+}
+
+// Describes a request to enable, modify, or disable an add-on for an Amazon
+// Lightsail resource.
+//
+// An additional cost may be associated with enabling add-ons. For more information,
+// see the Lightsail pricing page (https://aws.amazon.com/lightsail/pricing/).
+type AddOnRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The add-on type.
+ //
+ // AddOnType is a required field
+ AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"`
+
+ // An object that represents additional parameters when enabling or modifying
+ // the automatic snapshot add-on.
+ AutoSnapshotAddOnRequest *AutoSnapshotAddOnRequest `locationName:"autoSnapshotAddOnRequest" type:"structure"`
+}
+
+// String returns the string representation
+func (s AddOnRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AddOnRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AddOnRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AddOnRequest"}
+ if s.AddOnType == nil {
+ invalidParams.Add(request.NewErrParamRequired("AddOnType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOnType sets the AddOnType field's value.
+func (s *AddOnRequest) SetAddOnType(v string) *AddOnRequest {
+ s.AddOnType = &v
+ return s
+}
+
+// SetAutoSnapshotAddOnRequest sets the AutoSnapshotAddOnRequest field's value.
+func (s *AddOnRequest) SetAutoSnapshotAddOnRequest(v *AutoSnapshotAddOnRequest) *AddOnRequest {
+ s.AutoSnapshotAddOnRequest = v
+ return s
+}
+
+// Describes an alarm.
+//
+// An alarm is a way to monitor your Lightsail resource metrics. For more information,
+// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms).
+type Alarm struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the alarm.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The arithmetic operation used when comparing the specified statistic and
+ // threshold.
+ ComparisonOperator *string `locationName:"comparisonOperator" type:"string" enum:"ComparisonOperator"`
+
+ // The contact protocols for the alarm, such as Email, SMS (text messaging),
+ // or both.
+ ContactProtocols []*string `locationName:"contactProtocols" type:"list"`
+
+ // The timestamp when the alarm was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The number of data points that must not within the specified threshold to
+ // trigger the alarm.
+ DatapointsToAlarm *int64 `locationName:"datapointsToAlarm" type:"integer"`
+
+ // The number of periods over which data is compared to the specified threshold.
+ EvaluationPeriods *int64 `locationName:"evaluationPeriods" type:"integer"`
+
+ // An object that lists information about the location of the alarm.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the metric associated with the alarm.
+ MetricName *string `locationName:"metricName" type:"string" enum:"MetricName"`
+
+ // An object that lists information about the resource monitored by the alarm.
+ MonitoredResourceInfo *MonitoredResourceInfo `locationName:"monitoredResourceInfo" type:"structure"`
+
+ // The name of the alarm.
+ Name *string `locationName:"name" type:"string"`
+
+ // Indicates whether the alarm is enabled.
+ NotificationEnabled *bool `locationName:"notificationEnabled" type:"boolean"`
+
+ // The alarm states that trigger a notification.
+ NotificationTriggers []*string `locationName:"notificationTriggers" type:"list"`
+
+ // The period, in seconds, over which the statistic is applied.
+ Period *int64 `locationName:"period" min:"60" type:"integer"`
+
+ // The Lightsail resource type (e.g., Alarm).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The current state of the alarm.
+ //
+ // An alarm has the following possible states:
+ //
+ // * ALARM - The metric is outside of the defined threshold.
+ //
+ // * INSUFFICIENT_DATA - The alarm has just started, the metric is not available,
+ // or not enough data is available for the metric to determine the alarm
+ // state.
+ //
+ // * OK - The metric is within the defined threshold.
+ State *string `locationName:"state" type:"string" enum:"AlarmState"`
+
+ // The statistic for the metric associated with the alarm.
+ //
+ // The following statistics are available:
+ //
+ // * Minimum - The lowest value observed during the specified period. Use
+ // this value to determine low volumes of activity for your application.
+ //
+ // * Maximum - The highest value observed during the specified period. Use
+ // this value to determine high volumes of activity for your application.
+ //
+ // * Sum - All values submitted for the matching metric added together. You
+ // can use this statistic to determine the total volume of a metric.
+ //
+ // * Average - The value of Sum / SampleCount during the specified period.
+ // By comparing this statistic with the Minimum and Maximum values, you can
+ // determine the full scope of a metric and how close the average use is
+ // to the Minimum and Maximum values. This comparison helps you to know when
+ // to increase or decrease your resources.
+ //
+ // * SampleCount - The count, or number, of data points used for the statistical
+ // calculation.
+ Statistic *string `locationName:"statistic" type:"string" enum:"MetricStatistic"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about your Lightsail alarm. This code enables our support team
+ // to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The value against which the specified statistic is compared.
+ Threshold *float64 `locationName:"threshold" type:"double"`
+
+ // Specifies how the alarm handles missing data points.
+ //
+ // An alarm can treat missing data in the following ways:
+ //
+ // * breaching - Assume the missing data is not within the threshold. Missing
+ // data counts towards the number of times the metric is not within the threshold.
+ //
+ // * notBreaching - Assume the missing data is within the threshold. Missing
+ // data does not count towards the number of times the metric is not within
+ // the threshold.
+ //
+ // * ignore - Ignore the missing data. Maintains the current alarm state.
+ //
+ // * missing - Missing data is treated as missing.
+ TreatMissingData *string `locationName:"treatMissingData" type:"string" enum:"TreatMissingData"`
+
+ // The unit of the metric associated with the alarm.
+ Unit *string `locationName:"unit" type:"string" enum:"MetricUnit"`
+}
+
+// String returns the string representation
+func (s Alarm) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Alarm) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *Alarm) SetArn(v string) *Alarm {
+ s.Arn = &v
+ return s
+}
+
+// SetComparisonOperator sets the ComparisonOperator field's value.
+func (s *Alarm) SetComparisonOperator(v string) *Alarm {
+ s.ComparisonOperator = &v
+ return s
+}
+
+// SetContactProtocols sets the ContactProtocols field's value.
+func (s *Alarm) SetContactProtocols(v []*string) *Alarm {
+ s.ContactProtocols = v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Alarm) SetCreatedAt(v time.Time) *Alarm {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDatapointsToAlarm sets the DatapointsToAlarm field's value.
+func (s *Alarm) SetDatapointsToAlarm(v int64) *Alarm {
+ s.DatapointsToAlarm = &v
+ return s
+}
+
+// SetEvaluationPeriods sets the EvaluationPeriods field's value.
+func (s *Alarm) SetEvaluationPeriods(v int64) *Alarm {
+ s.EvaluationPeriods = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *Alarm) SetLocation(v *ResourceLocation) *Alarm {
+ s.Location = v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *Alarm) SetMetricName(v string) *Alarm {
+ s.MetricName = &v
+ return s
+}
+
+// SetMonitoredResourceInfo sets the MonitoredResourceInfo field's value.
+func (s *Alarm) SetMonitoredResourceInfo(v *MonitoredResourceInfo) *Alarm {
+ s.MonitoredResourceInfo = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Alarm) SetName(v string) *Alarm {
+ s.Name = &v
+ return s
+}
+
+// SetNotificationEnabled sets the NotificationEnabled field's value.
+func (s *Alarm) SetNotificationEnabled(v bool) *Alarm {
+ s.NotificationEnabled = &v
+ return s
+}
+
+// SetNotificationTriggers sets the NotificationTriggers field's value.
+func (s *Alarm) SetNotificationTriggers(v []*string) *Alarm {
+ s.NotificationTriggers = v
+ return s
+}
+
+// SetPeriod sets the Period field's value.
+func (s *Alarm) SetPeriod(v int64) *Alarm {
+ s.Period = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *Alarm) SetResourceType(v string) *Alarm {
+ s.ResourceType = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *Alarm) SetState(v string) *Alarm {
+ s.State = &v
+ return s
+}
+
+// SetStatistic sets the Statistic field's value.
+func (s *Alarm) SetStatistic(v string) *Alarm {
+ s.Statistic = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *Alarm) SetSupportCode(v string) *Alarm {
+ s.SupportCode = &v
+ return s
+}
+
+// SetThreshold sets the Threshold field's value.
+func (s *Alarm) SetThreshold(v float64) *Alarm {
+ s.Threshold = &v
+ return s
+}
+
+// SetTreatMissingData sets the TreatMissingData field's value.
+func (s *Alarm) SetTreatMissingData(v string) *Alarm {
+ s.TreatMissingData = &v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *Alarm) SetUnit(v string) *Alarm {
+ s.Unit = &v
+ return s
+}
+
+type AllocateStaticIpInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the static IP address.
+ //
+ // StaticIpName is a required field
+ StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AllocateStaticIpInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AllocateStaticIpInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AllocateStaticIpInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AllocateStaticIpInput"}
+ if s.StaticIpName == nil {
+ invalidParams.Add(request.NewErrParamRequired("StaticIpName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetStaticIpName sets the StaticIpName field's value.
+func (s *AllocateStaticIpInput) SetStaticIpName(v string) *AllocateStaticIpInput {
+ s.StaticIpName = &v
+ return s
+}
+
+type AllocateStaticIpOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s AllocateStaticIpOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AllocateStaticIpOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *AllocateStaticIpOutput) SetOperations(v []*Operation) *AllocateStaticIpOutput {
+ s.Operations = v
+ return s
+}
+
+type AttachCertificateToDistributionInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the certificate to attach to a distribution.
+ //
+ // Only certificates with a status of ISSUED can be attached to a distribution.
+ //
+ // Use the GetCertificates action to get a list of certificate names that you
+ // can specify.
+ //
+ // This is the name of the certificate resource type and is used only to reference
+ // the certificate in other API actions. It can be different than the domain
+ // name of the certificate. For example, your certificate name might be WordPress-Blog-Certificate
+ // and the domain name of the certificate might be example.com.
+ //
+ // CertificateName is a required field
+ CertificateName *string `locationName:"certificateName" type:"string" required:"true"`
+
+ // The name of the distribution that the certificate will be attached to.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ //
+ // DistributionName is a required field
+ DistributionName *string `locationName:"distributionName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AttachCertificateToDistributionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachCertificateToDistributionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttachCertificateToDistributionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttachCertificateToDistributionInput"}
+ if s.CertificateName == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateName"))
+ }
+ if s.DistributionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DistributionName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *AttachCertificateToDistributionInput) SetCertificateName(v string) *AttachCertificateToDistributionInput {
+ s.CertificateName = &v
+ return s
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *AttachCertificateToDistributionInput) SetDistributionName(v string) *AttachCertificateToDistributionInput {
+ s.DistributionName = &v
+ return s
+}
+
+type AttachCertificateToDistributionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the result of the action, such as the status of
+ // the request, the timestamp of the request, and the resources affected by
+ // the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s AttachCertificateToDistributionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachCertificateToDistributionOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *AttachCertificateToDistributionOutput) SetOperation(v *Operation) *AttachCertificateToDistributionOutput {
+ s.Operation = v
+ return s
+}
+
+type AttachDiskInput struct {
+ _ struct{} `type:"structure"`
+
+ // The unique Lightsail disk name (e.g., my-disk).
+ //
+ // DiskName is a required field
+ DiskName *string `locationName:"diskName" type:"string" required:"true"`
+
+ // The disk path to expose to the instance (e.g., /dev/xvdf).
+ //
+ // DiskPath is a required field
+ DiskPath *string `locationName:"diskPath" type:"string" required:"true"`
+
+ // The name of the Lightsail instance where you want to utilize the storage
+ // disk.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AttachDiskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachDiskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttachDiskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttachDiskInput"}
+ if s.DiskName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskName"))
+ }
+ if s.DiskPath == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskPath"))
+ }
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiskName sets the DiskName field's value.
+func (s *AttachDiskInput) SetDiskName(v string) *AttachDiskInput {
+ s.DiskName = &v
+ return s
+}
+
+// SetDiskPath sets the DiskPath field's value.
+func (s *AttachDiskInput) SetDiskPath(v string) *AttachDiskInput {
+ s.DiskPath = &v
+ return s
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *AttachDiskInput) SetInstanceName(v string) *AttachDiskInput {
+ s.InstanceName = &v
+ return s
+}
+
+type AttachDiskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s AttachDiskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachDiskOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *AttachDiskOutput) SetOperations(v []*Operation) *AttachDiskOutput {
+ s.Operations = v
+ return s
+}
+
+type AttachInstancesToLoadBalancerInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of strings representing the instance name(s) you want to attach
+ // to your load balancer.
+ //
+ // An instance must be running before you can attach it to your load balancer.
+ //
+ // There are no additional limits on the number of instances you can attach
+ // to your load balancer, aside from the limit of Lightsail instances you can
+ // create in your account (20).
+ //
+ // InstanceNames is a required field
+ InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"`
+
+ // The name of the load balancer.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AttachInstancesToLoadBalancerInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachInstancesToLoadBalancerInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttachInstancesToLoadBalancerInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttachInstancesToLoadBalancerInput"}
+ if s.InstanceNames == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceNames"))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceNames sets the InstanceNames field's value.
+func (s *AttachInstancesToLoadBalancerInput) SetInstanceNames(v []*string) *AttachInstancesToLoadBalancerInput {
+ s.InstanceNames = v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *AttachInstancesToLoadBalancerInput) SetLoadBalancerName(v string) *AttachInstancesToLoadBalancerInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type AttachInstancesToLoadBalancerOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s AttachInstancesToLoadBalancerOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachInstancesToLoadBalancerOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *AttachInstancesToLoadBalancerOutput) SetOperations(v []*Operation) *AttachInstancesToLoadBalancerOutput {
+ s.Operations = v
+ return s
+}
+
+type AttachLoadBalancerTlsCertificateInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of your SSL/TLS certificate.
+ //
+ // CertificateName is a required field
+ CertificateName *string `locationName:"certificateName" type:"string" required:"true"`
+
+ // The name of the load balancer to which you want to associate the SSL/TLS
+ // certificate.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AttachLoadBalancerTlsCertificateInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachLoadBalancerTlsCertificateInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttachLoadBalancerTlsCertificateInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttachLoadBalancerTlsCertificateInput"}
+ if s.CertificateName == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateName"))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *AttachLoadBalancerTlsCertificateInput) SetCertificateName(v string) *AttachLoadBalancerTlsCertificateInput {
+ s.CertificateName = &v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *AttachLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *AttachLoadBalancerTlsCertificateInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type AttachLoadBalancerTlsCertificateOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ //
+ // These SSL/TLS certificates are only usable by Lightsail load balancers. You
+ // can't get the certificate and use it for another purpose.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s AttachLoadBalancerTlsCertificateOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachLoadBalancerTlsCertificateOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *AttachLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *AttachLoadBalancerTlsCertificateOutput {
+ s.Operations = v
+ return s
+}
+
+type AttachStaticIpInput struct {
+ _ struct{} `type:"structure"`
+
+ // The instance name to which you want to attach the static IP address.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+
+ // The name of the static IP.
+ //
+ // StaticIpName is a required field
+ StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AttachStaticIpInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachStaticIpInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttachStaticIpInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttachStaticIpInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+ if s.StaticIpName == nil {
+ invalidParams.Add(request.NewErrParamRequired("StaticIpName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *AttachStaticIpInput) SetInstanceName(v string) *AttachStaticIpInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetStaticIpName sets the StaticIpName field's value.
+func (s *AttachStaticIpInput) SetStaticIpName(v string) *AttachStaticIpInput {
+ s.StaticIpName = &v
+ return s
+}
+
+type AttachStaticIpOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s AttachStaticIpOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachStaticIpOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *AttachStaticIpOutput) SetOperations(v []*Operation) *AttachStaticIpOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes a block storage disk that is attached to an instance, and is included
+// in an automatic snapshot.
+type AttachedDisk struct {
+ _ struct{} `type:"structure"`
+
+ // The path of the disk (e.g., /dev/xvdf).
+ Path *string `locationName:"path" type:"string"`
+
+ // The size of the disk in GB.
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer"`
+}
+
+// String returns the string representation
+func (s AttachedDisk) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttachedDisk) GoString() string {
+ return s.String()
+}
+
+// SetPath sets the Path field's value.
+func (s *AttachedDisk) SetPath(v string) *AttachedDisk {
+ s.Path = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *AttachedDisk) SetSizeInGb(v int64) *AttachedDisk {
+ s.SizeInGb = &v
+ return s
+}
+
+// Describes a request to enable or modify the automatic snapshot add-on for
+// an Amazon Lightsail instance or disk.
+//
+// When you modify the automatic snapshot time for a resource, it is typically
+// effective immediately except under the following conditions:
+//
+// * If an automatic snapshot has been created for the current day, and you
+// change the snapshot time to a later time of day, then the new snapshot
+// time will be effective the following day. This ensures that two snapshots
+// are not created for the current day.
+//
+// * If an automatic snapshot has not yet been created for the current day,
+// and you change the snapshot time to an earlier time of day, then the new
+// snapshot time will be effective the following day and a snapshot is automatically
+// created at the previously set time for the current day. This ensures that
+// a snapshot is created for the current day.
+//
+// * If an automatic snapshot has not yet been created for the current day,
+// and you change the snapshot time to a time that is within 30 minutes from
+// your current time, then the new snapshot time will be effective the following
+// day and a snapshot is automatically created at the previously set time
+// for the current day. This ensures that a snapshot is created for the current
+// day, because 30 minutes is required between your current time and the
+// new snapshot time that you specify.
+//
+// * If an automatic snapshot is scheduled to be created within 30 minutes
+// from your current time and you change the snapshot time, then the new
+// snapshot time will be effective the following day and a snapshot is automatically
+// created at the previously set time for the current day. This ensures that
+// a snapshot is created for the current day, because 30 minutes is required
+// between your current time and the new snapshot time that you specify.
+type AutoSnapshotAddOnRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The daily time when an automatic snapshot will be created.
+ //
+ // Constraints:
+ //
+ // * Must be in HH:00 format, and in an hourly increment.
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * The snapshot will be automatically created between the time specified
+ // and up to 45 minutes after.
+ SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"`
+}
+
+// String returns the string representation
+func (s AutoSnapshotAddOnRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoSnapshotAddOnRequest) GoString() string {
+ return s.String()
+}
+
+// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value.
+func (s *AutoSnapshotAddOnRequest) SetSnapshotTimeOfDay(v string) *AutoSnapshotAddOnRequest {
+ s.SnapshotTimeOfDay = &v
+ return s
+}
+
+// Describes an automatic snapshot.
+type AutoSnapshotDetails struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp when the automatic snapshot was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The date of the automatic snapshot in YYYY-MM-DD format.
+ Date *string `locationName:"date" type:"string"`
+
+ // An array of objects that describe the block storage disks attached to the
+ // instance when the automatic snapshot was created.
+ FromAttachedDisks []*AttachedDisk `locationName:"fromAttachedDisks" type:"list"`
+
+ // The status of the automatic snapshot.
+ Status *string `locationName:"status" type:"string" enum:"AutoSnapshotStatus"`
+}
+
+// String returns the string representation
+func (s AutoSnapshotDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoSnapshotDetails) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *AutoSnapshotDetails) SetCreatedAt(v time.Time) *AutoSnapshotDetails {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDate sets the Date field's value.
+func (s *AutoSnapshotDetails) SetDate(v string) *AutoSnapshotDetails {
+ s.Date = &v
+ return s
+}
+
+// SetFromAttachedDisks sets the FromAttachedDisks field's value.
+func (s *AutoSnapshotDetails) SetFromAttachedDisks(v []*AttachedDisk) *AutoSnapshotDetails {
+ s.FromAttachedDisks = v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *AutoSnapshotDetails) SetStatus(v string) *AutoSnapshotDetails {
+ s.Status = &v
+ return s
+}
+
+// Describes an Availability Zone.
+type AvailabilityZone struct {
+ _ struct{} `type:"structure"`
+
+ // The state of the Availability Zone.
+ State *string `locationName:"state" type:"string"`
+
+ // The name of the Availability Zone. The format is us-east-2a (case-sensitive).
+ ZoneName *string `locationName:"zoneName" type:"string"`
+}
+
+// String returns the string representation
+func (s AvailabilityZone) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AvailabilityZone) GoString() string {
+ return s.String()
+}
+
+// SetState sets the State field's value.
+func (s *AvailabilityZone) SetState(v string) *AvailabilityZone {
+ s.State = &v
+ return s
+}
+
+// SetZoneName sets the ZoneName field's value.
+func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone {
+ s.ZoneName = &v
+ return s
+}
+
+// Describes a blueprint (a virtual private server image).
+type Blueprint struct {
+ _ struct{} `type:"structure"`
+
+ // The ID for the virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0).
+ BlueprintId *string `locationName:"blueprintId" type:"string"`
+
+ // The description of the blueprint.
+ Description *string `locationName:"description" type:"string"`
+
+ // The group name of the blueprint (e.g., amazon-linux).
+ Group *string `locationName:"group" type:"string"`
+
+ // A Boolean value indicating whether the blueprint is active. Inactive blueprints
+ // are listed to support customers with existing instances but are not necessarily
+ // available for launch of new instances. Blueprints are marked inactive when
+ // they become outdated due to operating system updates or new application releases.
+ IsActive *bool `locationName:"isActive" type:"boolean"`
+
+ // The end-user license agreement URL for the image or blueprint.
+ LicenseUrl *string `locationName:"licenseUrl" type:"string"`
+
+ // The minimum bundle power required to run this blueprint. For example, you
+ // need a bundle with a power value of 500 or more to create an instance that
+ // uses a blueprint with a minimum power value of 500. 0 indicates that the
+ // blueprint runs on all instance sizes.
+ MinPower *int64 `locationName:"minPower" type:"integer"`
+
+ // The friendly name of the blueprint (e.g., Amazon Linux).
+ Name *string `locationName:"name" type:"string"`
+
+ // The operating system platform (either Linux/Unix-based or Windows Server-based)
+ // of the blueprint.
+ Platform *string `locationName:"platform" type:"string" enum:"InstancePlatform"`
+
+ // The product URL to learn more about the image or blueprint.
+ ProductUrl *string `locationName:"productUrl" type:"string"`
+
+ // The type of the blueprint (e.g., os or app).
+ Type *string `locationName:"type" type:"string" enum:"BlueprintType"`
+
+ // The version number of the operating system, application, or stack (e.g.,
+ // 2016.03.0).
+ Version *string `locationName:"version" type:"string"`
+
+ // The version code.
+ VersionCode *string `locationName:"versionCode" type:"string"`
+}
+
+// String returns the string representation
+func (s Blueprint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Blueprint) GoString() string {
+ return s.String()
+}
+
+// SetBlueprintId sets the BlueprintId field's value.
+func (s *Blueprint) SetBlueprintId(v string) *Blueprint {
+ s.BlueprintId = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *Blueprint) SetDescription(v string) *Blueprint {
+ s.Description = &v
+ return s
+}
+
+// SetGroup sets the Group field's value.
+func (s *Blueprint) SetGroup(v string) *Blueprint {
+ s.Group = &v
+ return s
+}
+
+// SetIsActive sets the IsActive field's value.
+func (s *Blueprint) SetIsActive(v bool) *Blueprint {
+ s.IsActive = &v
+ return s
+}
+
+// SetLicenseUrl sets the LicenseUrl field's value.
+func (s *Blueprint) SetLicenseUrl(v string) *Blueprint {
+ s.LicenseUrl = &v
+ return s
+}
+
+// SetMinPower sets the MinPower field's value.
+func (s *Blueprint) SetMinPower(v int64) *Blueprint {
+ s.MinPower = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Blueprint) SetName(v string) *Blueprint {
+ s.Name = &v
+ return s
+}
+
+// SetPlatform sets the Platform field's value.
+func (s *Blueprint) SetPlatform(v string) *Blueprint {
+ s.Platform = &v
+ return s
+}
+
+// SetProductUrl sets the ProductUrl field's value.
+func (s *Blueprint) SetProductUrl(v string) *Blueprint {
+ s.ProductUrl = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *Blueprint) SetType(v string) *Blueprint {
+ s.Type = &v
+ return s
+}
+
+// SetVersion sets the Version field's value.
+func (s *Blueprint) SetVersion(v string) *Blueprint {
+ s.Version = &v
+ return s
+}
+
+// SetVersionCode sets the VersionCode field's value.
+func (s *Blueprint) SetVersionCode(v string) *Blueprint {
+ s.VersionCode = &v
+ return s
+}
+
+// Describes an Amazon Lightsail bucket.
+type Bucket struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether the bundle that is currently applied to a bucket can be
+ // changed to another bundle.
+ //
+ // You can update a bucket's bundle only one time within a monthly AWS billing
+ // cycle.
+ //
+ // Use the UpdateBucketBundle action to change a bucket's bundle.
+ AbleToUpdateBundle *bool `locationName:"ableToUpdateBundle" type:"boolean"`
+
+ // An object that describes the access rules of the bucket.
+ AccessRules *AccessRules `locationName:"accessRules" type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the bucket.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The ID of the bundle currently applied to the bucket.
+ //
+ // A bucket bundle specifies the monthly cost, storage space, and data transfer
+ // quota for a bucket.
+ //
+ // Use the UpdateBucketBundle action to change the bundle of a bucket.
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // The timestamp when the distribution was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // Describes the resource location.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the bucket.
+ Name *string `locationName:"name" min:"3" type:"string"`
+
+ // Indicates whether object versioning is enabled for the bucket.
+ //
+ // The following options can be configured:
+ //
+ // * Enabled - Object versioning is enabled.
+ //
+ // * Suspended - Object versioning was previously enabled but is currently
+ // suspended. Existing object versions are retained.
+ //
+ // * NeverEnabled - Object versioning has never been enabled.
+ ObjectVersioning *string `locationName:"objectVersioning" type:"string"`
+
+ // An array of strings that specify the AWS account IDs that have read-only
+ // access to the bucket.
+ ReadonlyAccessAccounts []*string `locationName:"readonlyAccessAccounts" type:"list"`
+
+ // The Lightsail resource type of the bucket (for example, Bucket).
+ ResourceType *string `locationName:"resourceType" type:"string"`
+
+ // An array of objects that describe Lightsail instances that have access to
+ // the bucket.
+ //
+ // Use the SetResourceAccessForBucket action to update the instances that have
+ // access to a bucket.
+ ResourcesReceivingAccess []*ResourceReceivingAccess `locationName:"resourcesReceivingAccess" type:"list"`
+
+ // An object that describes the state of the bucket.
+ State *BucketState `locationName:"state" type:"structure"`
+
+ // The support code for a bucket. Include this code in your email to support
+ // when you have questions about a Lightsail bucket. This code enables our support
+ // team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the bucket. For more information, see
+ // Tags in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags)
+ // in the Amazon Lightsail Developer Guide.
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // The URL of the bucket.
+ Url *string `locationName:"url" type:"string"`
+}
+
+// String returns the string representation
+func (s Bucket) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Bucket) GoString() string {
+ return s.String()
+}
+
+// SetAbleToUpdateBundle sets the AbleToUpdateBundle field's value.
+func (s *Bucket) SetAbleToUpdateBundle(v bool) *Bucket {
+ s.AbleToUpdateBundle = &v
+ return s
+}
+
+// SetAccessRules sets the AccessRules field's value.
+func (s *Bucket) SetAccessRules(v *AccessRules) *Bucket {
+ s.AccessRules = v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *Bucket) SetArn(v string) *Bucket {
+ s.Arn = &v
+ return s
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *Bucket) SetBundleId(v string) *Bucket {
+ s.BundleId = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Bucket) SetCreatedAt(v time.Time) *Bucket {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *Bucket) SetLocation(v *ResourceLocation) *Bucket {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Bucket) SetName(v string) *Bucket {
+ s.Name = &v
+ return s
+}
+
+// SetObjectVersioning sets the ObjectVersioning field's value.
+func (s *Bucket) SetObjectVersioning(v string) *Bucket {
+ s.ObjectVersioning = &v
+ return s
+}
+
+// SetReadonlyAccessAccounts sets the ReadonlyAccessAccounts field's value.
+func (s *Bucket) SetReadonlyAccessAccounts(v []*string) *Bucket {
+ s.ReadonlyAccessAccounts = v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *Bucket) SetResourceType(v string) *Bucket {
+ s.ResourceType = &v
+ return s
+}
+
+// SetResourcesReceivingAccess sets the ResourcesReceivingAccess field's value.
+func (s *Bucket) SetResourcesReceivingAccess(v []*ResourceReceivingAccess) *Bucket {
+ s.ResourcesReceivingAccess = v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *Bucket) SetState(v *BucketState) *Bucket {
+ s.State = v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *Bucket) SetSupportCode(v string) *Bucket {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *Bucket) SetTags(v []*Tag) *Bucket {
+ s.Tags = v
+ return s
+}
+
+// SetUrl sets the Url field's value.
+func (s *Bucket) SetUrl(v string) *Bucket {
+ s.Url = &v
+ return s
+}
+
+// Describes the specifications of a bundle that can be applied to an Amazon
+// Lightsail bucket.
+//
+// A bucket bundle specifies the monthly cost, storage space, and data transfer
+// quota for a bucket.
+type BucketBundle struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the bundle.
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // Indicates whether the bundle is active. Use for a new or existing bucket.
+ IsActive *bool `locationName:"isActive" type:"boolean"`
+
+ // The name of the bundle.
+ Name *string `locationName:"name" type:"string"`
+
+ // The monthly price of the bundle, in US dollars.
+ Price *float64 `locationName:"price" type:"float"`
+
+ // The storage size of the bundle, in GB.
+ StoragePerMonthInGb *int64 `locationName:"storagePerMonthInGb" type:"integer"`
+
+ // The monthly network transfer quota of the bundle.
+ TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"`
+}
+
+// String returns the string representation
+func (s BucketBundle) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketBundle) GoString() string {
+ return s.String()
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *BucketBundle) SetBundleId(v string) *BucketBundle {
+ s.BundleId = &v
+ return s
+}
+
+// SetIsActive sets the IsActive field's value.
+func (s *BucketBundle) SetIsActive(v bool) *BucketBundle {
+ s.IsActive = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *BucketBundle) SetName(v string) *BucketBundle {
+ s.Name = &v
+ return s
+}
+
+// SetPrice sets the Price field's value.
+func (s *BucketBundle) SetPrice(v float64) *BucketBundle {
+ s.Price = &v
+ return s
+}
+
+// SetStoragePerMonthInGb sets the StoragePerMonthInGb field's value.
+func (s *BucketBundle) SetStoragePerMonthInGb(v int64) *BucketBundle {
+ s.StoragePerMonthInGb = &v
+ return s
+}
+
+// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value.
+func (s *BucketBundle) SetTransferPerMonthInGb(v int64) *BucketBundle {
+ s.TransferPerMonthInGb = &v
+ return s
+}
+
+// Describes the state of an Amazon Lightsail bucket.
+type BucketState struct {
+ _ struct{} `type:"structure"`
+
+ // The state code of the bucket.
+ //
+ // The following codes are possible:
+ //
+ // * OK - The bucket is in a running state.
+ //
+ // * Unknown - Creation of the bucket might have timed-out. You might want
+ // to delete the bucket and create a new one.
+ Code *string `locationName:"code" type:"string"`
+
+ // A message that describes the state of the bucket.
+ Message *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s BucketState) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketState) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *BucketState) SetCode(v string) *BucketState {
+ s.Code = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *BucketState) SetMessage(v string) *BucketState {
+ s.Message = &v
+ return s
+}
+
+// Describes a bundle, which is a set of specs describing your virtual private
+// server (or instance).
+type Bundle struct {
+ _ struct{} `type:"structure"`
+
+ // The bundle ID (e.g., micro_1_0).
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // The number of vCPUs included in the bundle (e.g., 2).
+ CpuCount *int64 `locationName:"cpuCount" type:"integer"`
+
+ // The size of the SSD (e.g., 30).
+ DiskSizeInGb *int64 `locationName:"diskSizeInGb" type:"integer"`
+
+ // The Amazon EC2 instance type (e.g., t2.micro).
+ InstanceType *string `locationName:"instanceType" type:"string"`
+
+ // A Boolean value indicating whether the bundle is active.
+ IsActive *bool `locationName:"isActive" type:"boolean"`
+
+ // A friendly name for the bundle (e.g., Micro).
+ Name *string `locationName:"name" type:"string"`
+
+ // A numeric value that represents the power of the bundle (e.g., 500). You
+ // can use the bundle's power value in conjunction with a blueprint's minimum
+ // power value to determine whether the blueprint will run on the bundle. For
+ // example, you need a bundle with a power value of 500 or more to create an
+ // instance that uses a blueprint with a minimum power value of 500.
+ Power *int64 `locationName:"power" type:"integer"`
+
+ // The price in US dollars (e.g., 5.0) of the bundle.
+ Price *float64 `locationName:"price" type:"float"`
+
+ // The amount of RAM in GB (e.g., 2.0).
+ RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"`
+
+ // The operating system platform (Linux/Unix-based or Windows Server-based)
+ // that the bundle supports. You can only launch a WINDOWS bundle on a blueprint
+ // that supports the WINDOWS platform. LINUX_UNIX blueprints require a LINUX_UNIX
+ // bundle.
+ SupportedPlatforms []*string `locationName:"supportedPlatforms" type:"list"`
+
+ // The data transfer rate per month in GB (e.g., 2000).
+ TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"`
+}
+
+// String returns the string representation
+func (s Bundle) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Bundle) GoString() string {
+ return s.String()
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *Bundle) SetBundleId(v string) *Bundle {
+ s.BundleId = &v
+ return s
+}
+
+// SetCpuCount sets the CpuCount field's value.
+func (s *Bundle) SetCpuCount(v int64) *Bundle {
+ s.CpuCount = &v
+ return s
+}
+
+// SetDiskSizeInGb sets the DiskSizeInGb field's value.
+func (s *Bundle) SetDiskSizeInGb(v int64) *Bundle {
+ s.DiskSizeInGb = &v
+ return s
+}
+
+// SetInstanceType sets the InstanceType field's value.
+func (s *Bundle) SetInstanceType(v string) *Bundle {
+ s.InstanceType = &v
+ return s
+}
+
+// SetIsActive sets the IsActive field's value.
+func (s *Bundle) SetIsActive(v bool) *Bundle {
+ s.IsActive = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Bundle) SetName(v string) *Bundle {
+ s.Name = &v
+ return s
+}
+
+// SetPower sets the Power field's value.
+func (s *Bundle) SetPower(v int64) *Bundle {
+ s.Power = &v
+ return s
+}
+
+// SetPrice sets the Price field's value.
+func (s *Bundle) SetPrice(v float64) *Bundle {
+ s.Price = &v
+ return s
+}
+
+// SetRamSizeInGb sets the RamSizeInGb field's value.
+func (s *Bundle) SetRamSizeInGb(v float64) *Bundle {
+ s.RamSizeInGb = &v
+ return s
+}
+
+// SetSupportedPlatforms sets the SupportedPlatforms field's value.
+func (s *Bundle) SetSupportedPlatforms(v []*string) *Bundle {
+ s.SupportedPlatforms = v
+ return s
+}
+
+// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value.
+func (s *Bundle) SetTransferPerMonthInGb(v int64) *Bundle {
+ s.TransferPerMonthInGb = &v
+ return s
+}
+
+// Describes the default cache behavior of an Amazon Lightsail content delivery
+// network (CDN) distribution.
+type CacheBehavior struct {
+ _ struct{} `type:"structure"`
+
+ // The cache behavior of the distribution.
+ //
+ // The following cache behaviors can be specified:
+ //
+ // * cache - This option is best for static sites. When specified, your distribution
+ // caches and serves your entire website as static content. This behavior
+ // is ideal for websites with static content that doesn't change depending
+ // on who views it, or for websites that don't use cookies, headers, or query
+ // strings to personalize content.
+ //
+ // * dont-cache - This option is best for sites that serve a mix of static
+ // and dynamic content. When specified, your distribution caches and serve
+ // only the content that is specified in the distribution's CacheBehaviorPerPath
+ // parameter. This behavior is ideal for websites or web applications that
+ // use cookies, headers, and query strings to personalize content for individual
+ // users.
+ Behavior *string `locationName:"behavior" type:"string" enum:"BehaviorEnum"`
+}
+
+// String returns the string representation
+func (s CacheBehavior) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CacheBehavior) GoString() string {
+ return s.String()
+}
+
+// SetBehavior sets the Behavior field's value.
+func (s *CacheBehavior) SetBehavior(v string) *CacheBehavior {
+ s.Behavior = &v
+ return s
+}
+
+// Describes the per-path cache behavior of an Amazon Lightsail content delivery
+// network (CDN) distribution.
+//
+// A per-path cache behavior is used to override, or add an exception to, the
+// default cache behavior of a distribution. For example, if the cacheBehavior
+// is set to cache, then a per-path cache behavior can be used to specify a
+// directory, file, or file type that your distribution will cache. Alternately,
+// if the distribution's cacheBehavior is dont-cache, then a per-path cache
+// behavior can be used to specify a directory, file, or file type that your
+// distribution will not cache.
+//
+// if the cacheBehavior's behavior is set to 'cache', then
+type CacheBehaviorPerPath struct {
+ _ struct{} `type:"structure"`
+
+ // The cache behavior for the specified path.
+ //
+ // You can specify one of the following per-path cache behaviors:
+ //
+ // * cache - This behavior caches the specified path.
+ //
+ // * dont-cache - This behavior doesn't cache the specified path.
+ Behavior *string `locationName:"behavior" type:"string" enum:"BehaviorEnum"`
+
+ // The path to a directory or file to cached, or not cache. Use an asterisk
+ // symbol to specify wildcard directories (path/to/assets/*), and file types
+ // (*.html, *jpg, *js). Directories and file paths are case-sensitive.
+ //
+ // Examples:
+ //
+ // * Specify the following to cache all files in the document root of an
+ // Apache web server running on a Lightsail instance. var/www/html/
+ //
+ // * Specify the following file to cache only the index page in the document
+ // root of an Apache web server. var/www/html/index.html
+ //
+ // * Specify the following to cache only the .html files in the document
+ // root of an Apache web server. var/www/html/*.html
+ //
+ // * Specify the following to cache only the .jpg, .png, and .gif files in
+ // the images sub-directory of the document root of an Apache web server.
+ // var/www/html/images/*.jpg var/www/html/images/*.png var/www/html/images/*.gif
+ // Specify the following to cache all files in the images sub-directory of
+ // the document root of an Apache web server. var/www/html/images/
+ Path *string `locationName:"path" type:"string"`
+}
+
+// String returns the string representation
+func (s CacheBehaviorPerPath) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CacheBehaviorPerPath) GoString() string {
+ return s.String()
+}
+
+// SetBehavior sets the Behavior field's value.
+func (s *CacheBehaviorPerPath) SetBehavior(v string) *CacheBehaviorPerPath {
+ s.Behavior = &v
+ return s
+}
+
+// SetPath sets the Path field's value.
+func (s *CacheBehaviorPerPath) SetPath(v string) *CacheBehaviorPerPath {
+ s.Path = &v
+ return s
+}
+
+// Describes the cache settings of an Amazon Lightsail content delivery network
+// (CDN) distribution.
+//
+// These settings apply only to your distribution's cacheBehaviors (including
+// the defaultCacheBehavior) that have a behavior of cache.
+type CacheSettings struct {
+ _ struct{} `type:"structure"`
+
+ // The HTTP methods that are processed and forwarded to the distribution's origin.
+ //
+ // You can specify the following options:
+ //
+ // * GET,HEAD - The distribution forwards the GET and HEAD methods.
+ //
+ // * GET,HEAD,OPTIONS - The distribution forwards the GET, HEAD, and OPTIONS
+ // methods.
+ //
+ // * GET,HEAD,OPTIONS,PUT,PATCH,POST,DELETE - The distribution forwards the
+ // GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE methods.
+ //
+ // If you specify the third option, you might need to restrict access to your
+ // distribution's origin so users can't perform operations that you don't want
+ // them to. For example, you might not want users to have permission to delete
+ // objects from your origin.
+ AllowedHTTPMethods *string `locationName:"allowedHTTPMethods" type:"string"`
+
+ // The HTTP method responses that are cached by your distribution.
+ //
+ // You can specify the following options:
+ //
+ // * GET,HEAD - The distribution caches responses to the GET and HEAD methods.
+ //
+ // * GET,HEAD,OPTIONS - The distribution caches responses to the GET, HEAD,
+ // and OPTIONS methods.
+ CachedHTTPMethods *string `locationName:"cachedHTTPMethods" type:"string"`
+
+ // The default amount of time that objects stay in the distribution's cache
+ // before the distribution forwards another request to the origin to determine
+ // whether the content has been updated.
+ //
+ // The value specified applies only when the origin does not add HTTP headers
+ // such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects.
+ DefaultTTL *int64 `locationName:"defaultTTL" type:"long"`
+
+ // An object that describes the cookies that are forwarded to the origin. Your
+ // content is cached based on the cookies that are forwarded.
+ ForwardedCookies *CookieObject `locationName:"forwardedCookies" type:"structure"`
+
+ // An object that describes the headers that are forwarded to the origin. Your
+ // content is cached based on the headers that are forwarded.
+ ForwardedHeaders *HeaderObject `locationName:"forwardedHeaders" type:"structure"`
+
+ // An object that describes the query strings that are forwarded to the origin.
+ // Your content is cached based on the query strings that are forwarded.
+ ForwardedQueryStrings *QueryStringObject `locationName:"forwardedQueryStrings" type:"structure"`
+
+ // The maximum amount of time that objects stay in the distribution's cache
+ // before the distribution forwards another request to the origin to determine
+ // whether the object has been updated.
+ //
+ // The value specified applies only when the origin adds HTTP headers such as
+ // Cache-Control max-age, Cache-Control s-maxage, and Expires to objects.
+ MaximumTTL *int64 `locationName:"maximumTTL" type:"long"`
+
+ // The minimum amount of time that objects stay in the distribution's cache
+ // before the distribution forwards another request to the origin to determine
+ // whether the object has been updated.
+ //
+ // A value of 0 must be specified for minimumTTL if the distribution is configured
+ // to forward all headers to the origin.
+ MinimumTTL *int64 `locationName:"minimumTTL" type:"long"`
+}
+
+// String returns the string representation
+func (s CacheSettings) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CacheSettings) GoString() string {
+ return s.String()
+}
+
+// SetAllowedHTTPMethods sets the AllowedHTTPMethods field's value.
+func (s *CacheSettings) SetAllowedHTTPMethods(v string) *CacheSettings {
+ s.AllowedHTTPMethods = &v
+ return s
+}
+
+// SetCachedHTTPMethods sets the CachedHTTPMethods field's value.
+func (s *CacheSettings) SetCachedHTTPMethods(v string) *CacheSettings {
+ s.CachedHTTPMethods = &v
+ return s
+}
+
+// SetDefaultTTL sets the DefaultTTL field's value.
+func (s *CacheSettings) SetDefaultTTL(v int64) *CacheSettings {
+ s.DefaultTTL = &v
+ return s
+}
+
+// SetForwardedCookies sets the ForwardedCookies field's value.
+func (s *CacheSettings) SetForwardedCookies(v *CookieObject) *CacheSettings {
+ s.ForwardedCookies = v
+ return s
+}
+
+// SetForwardedHeaders sets the ForwardedHeaders field's value.
+func (s *CacheSettings) SetForwardedHeaders(v *HeaderObject) *CacheSettings {
+ s.ForwardedHeaders = v
+ return s
+}
+
+// SetForwardedQueryStrings sets the ForwardedQueryStrings field's value.
+func (s *CacheSettings) SetForwardedQueryStrings(v *QueryStringObject) *CacheSettings {
+ s.ForwardedQueryStrings = v
+ return s
+}
+
+// SetMaximumTTL sets the MaximumTTL field's value.
+func (s *CacheSettings) SetMaximumTTL(v int64) *CacheSettings {
+ s.MaximumTTL = &v
+ return s
+}
+
+// SetMinimumTTL sets the MinimumTTL field's value.
+func (s *CacheSettings) SetMinimumTTL(v int64) *CacheSettings {
+ s.MinimumTTL = &v
+ return s
+}
+
+// Describes the full details of an Amazon Lightsail SSL/TLS certificate.
+//
+// To get a summary of a certificate, use the GetCertificates action and ommit
+// includeCertificateDetails from your request. The response will include only
+// the certificate Amazon Resource Name (ARN), certificate name, domain name,
+// and tags.
+type Certificate struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the certificate.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The timestamp when the certificate was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The domain name of the certificate.
+ DomainName *string `locationName:"domainName" type:"string"`
+
+ // An array of objects that describe the domain validation records of the certificate.
+ DomainValidationRecords []*DomainValidationRecord `locationName:"domainValidationRecords" type:"list"`
+
+ // The renewal eligibility of the certificate.
+ EligibleToRenew *string `locationName:"eligibleToRenew" type:"string"`
+
+ // The number of Lightsail resources that the certificate is attached to.
+ InUseResourceCount *int64 `locationName:"inUseResourceCount" type:"integer"`
+
+ // The timestamp when the certificate was issued.
+ IssuedAt *time.Time `locationName:"issuedAt" type:"timestamp"`
+
+ // The certificate authority that issued the certificate.
+ IssuerCA *string `locationName:"issuerCA" type:"string"`
+
+ // The algorithm used to generate the key pair (the public and private key)
+ // of the certificate.
+ KeyAlgorithm *string `locationName:"keyAlgorithm" type:"string"`
+
+ // The name of the certificate (e.g., my-certificate).
+ Name *string `locationName:"name" type:"string"`
+
+ // The timestamp when the certificate expires.
+ NotAfter *time.Time `locationName:"notAfter" type:"timestamp"`
+
+ // The timestamp when the certificate is first valid.
+ NotBefore *time.Time `locationName:"notBefore" type:"timestamp"`
+
+ // An object that describes the status of the certificate renewal managed by
+ // Lightsail.
+ RenewalSummary *RenewalSummary `locationName:"renewalSummary" type:"structure"`
+
+ // The validation failure reason, if any, of the certificate.
+ //
+ // The following failure reasons are possible:
+ //
+ // * NO_AVAILABLE_CONTACTS - This failure applies to email validation, which
+ // is not available for Lightsail certificates.
+ //
+ // * ADDITIONAL_VERIFICATION_REQUIRED - Lightsail requires additional information
+ // to process this certificate request. This can happen as a fraud-protection
+ // measure, such as when the domain ranks within the Alexa top 1000 websites.
+ // To provide the required information, use the AWS Support Center (https://console.aws.amazon.com/support/home)
+ // to contact AWS Support. You cannot request a certificate for Amazon-owned
+ // domain names such as those ending in amazonaws.com, cloudfront.net, or
+ // elasticbeanstalk.com.
+ //
+ // * DOMAIN_NOT_ALLOWED - One or more of the domain names in the certificate
+ // request was reported as an unsafe domain by VirusTotal (https://www.virustotal.com/gui/home/url).
+ // To correct the problem, search for your domain name on the VirusTotal
+ // (https://www.virustotal.com/gui/home/url) website. If your domain is reported
+ // as suspicious, see Google Help for Hacked Websites (https://developers.google.com/web/fundamentals/security/hacked)
+ // to learn what you can do. If you believe that the result is a false positive,
+ // notify the organization that is reporting the domain. VirusTotal is an
+ // aggregate of several antivirus and URL scanners and cannot remove your
+ // domain from a block list itself. After you correct the problem and the
+ // VirusTotal registry has been updated, request a new certificate. If you
+ // see this error and your domain is not included in the VirusTotal list,
+ // visit the AWS Support Center (https://console.aws.amazon.com/support/home)
+ // and create a case.
+ //
+ // * INVALID_PUBLIC_DOMAIN - One or more of the domain names in the certificate
+ // request is not valid. Typically, this is because a domain name in the
+ // request is not a valid top-level domain. Try to request a certificate
+ // again, correcting any spelling errors or typos that were in the failed
+ // request, and ensure that all domain names in the request are for valid
+ // top-level domains. For example, you cannot request a certificate for example.invalidpublicdomain
+ // because invalidpublicdomain is not a valid top-level domain.
+ //
+ // * OTHER - Typically, this failure occurs when there is a typographical
+ // error in one or more of the domain names in the certificate request. Try
+ // to request a certificate again, correcting any spelling errors or typos
+ // that were in the failed request.
+ RequestFailureReason *string `locationName:"requestFailureReason" type:"string"`
+
+ // The reason the certificate was revoked. This value is present only when the
+ // certificate status is REVOKED.
+ RevocationReason *string `locationName:"revocationReason" type:"string"`
+
+ // The timestamp when the certificate was revoked. This value is present only
+ // when the certificate status is REVOKED.
+ RevokedAt *time.Time `locationName:"revokedAt" type:"timestamp"`
+
+ // The serial number of the certificate.
+ SerialNumber *string `locationName:"serialNumber" type:"string"`
+
+ // The validation status of the certificate.
+ Status *string `locationName:"status" type:"string" enum:"CertificateStatus"`
+
+ // An array of strings that specify the alternate domains (e.g., example2.com)
+ // and subdomains (e.g., blog.example.com) of the certificate.
+ SubjectAlternativeNames []*string `locationName:"subjectAlternativeNames" type:"list"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about your Lightsail certificate. This code enables our support
+ // team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s Certificate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Certificate) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *Certificate) SetArn(v string) *Certificate {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Certificate) SetCreatedAt(v time.Time) *Certificate {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *Certificate) SetDomainName(v string) *Certificate {
+ s.DomainName = &v
+ return s
+}
+
+// SetDomainValidationRecords sets the DomainValidationRecords field's value.
+func (s *Certificate) SetDomainValidationRecords(v []*DomainValidationRecord) *Certificate {
+ s.DomainValidationRecords = v
+ return s
+}
+
+// SetEligibleToRenew sets the EligibleToRenew field's value.
+func (s *Certificate) SetEligibleToRenew(v string) *Certificate {
+ s.EligibleToRenew = &v
+ return s
+}
+
+// SetInUseResourceCount sets the InUseResourceCount field's value.
+func (s *Certificate) SetInUseResourceCount(v int64) *Certificate {
+ s.InUseResourceCount = &v
+ return s
+}
+
+// SetIssuedAt sets the IssuedAt field's value.
+func (s *Certificate) SetIssuedAt(v time.Time) *Certificate {
+ s.IssuedAt = &v
+ return s
+}
+
+// SetIssuerCA sets the IssuerCA field's value.
+func (s *Certificate) SetIssuerCA(v string) *Certificate {
+ s.IssuerCA = &v
+ return s
+}
+
+// SetKeyAlgorithm sets the KeyAlgorithm field's value.
+func (s *Certificate) SetKeyAlgorithm(v string) *Certificate {
+ s.KeyAlgorithm = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Certificate) SetName(v string) *Certificate {
+ s.Name = &v
+ return s
+}
+
+// SetNotAfter sets the NotAfter field's value.
+func (s *Certificate) SetNotAfter(v time.Time) *Certificate {
+ s.NotAfter = &v
+ return s
+}
+
+// SetNotBefore sets the NotBefore field's value.
+func (s *Certificate) SetNotBefore(v time.Time) *Certificate {
+ s.NotBefore = &v
+ return s
+}
+
+// SetRenewalSummary sets the RenewalSummary field's value.
+func (s *Certificate) SetRenewalSummary(v *RenewalSummary) *Certificate {
+ s.RenewalSummary = v
+ return s
+}
+
+// SetRequestFailureReason sets the RequestFailureReason field's value.
+func (s *Certificate) SetRequestFailureReason(v string) *Certificate {
+ s.RequestFailureReason = &v
+ return s
+}
+
+// SetRevocationReason sets the RevocationReason field's value.
+func (s *Certificate) SetRevocationReason(v string) *Certificate {
+ s.RevocationReason = &v
+ return s
+}
+
+// SetRevokedAt sets the RevokedAt field's value.
+func (s *Certificate) SetRevokedAt(v time.Time) *Certificate {
+ s.RevokedAt = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *Certificate) SetSerialNumber(v string) *Certificate {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *Certificate) SetStatus(v string) *Certificate {
+ s.Status = &v
+ return s
+}
+
+// SetSubjectAlternativeNames sets the SubjectAlternativeNames field's value.
+func (s *Certificate) SetSubjectAlternativeNames(v []*string) *Certificate {
+ s.SubjectAlternativeNames = v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *Certificate) SetSupportCode(v string) *Certificate {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *Certificate) SetTags(v []*Tag) *Certificate {
+ s.Tags = v
+ return s
+}
+
+// Describes an Amazon Lightsail SSL/TLS certificate.
+type CertificateSummary struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the certificate.
+ CertificateArn *string `locationName:"certificateArn" type:"string"`
+
+ // An object that describes a certificate in detail.
+ CertificateDetail *Certificate `locationName:"certificateDetail" type:"structure"`
+
+ // The name of the certificate.
+ CertificateName *string `locationName:"certificateName" type:"string"`
+
+ // The domain name of the certificate.
+ DomainName *string `locationName:"domainName" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CertificateSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CertificateSummary) GoString() string {
+ return s.String()
+}
+
+// SetCertificateArn sets the CertificateArn field's value.
+func (s *CertificateSummary) SetCertificateArn(v string) *CertificateSummary {
+ s.CertificateArn = &v
+ return s
+}
+
+// SetCertificateDetail sets the CertificateDetail field's value.
+func (s *CertificateSummary) SetCertificateDetail(v *Certificate) *CertificateSummary {
+ s.CertificateDetail = v
+ return s
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *CertificateSummary) SetCertificateName(v string) *CertificateSummary {
+ s.CertificateName = &v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *CertificateSummary) SetDomainName(v string) *CertificateSummary {
+ s.DomainName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CertificateSummary) SetTags(v []*Tag) *CertificateSummary {
+ s.Tags = v
+ return s
+}
+
+type CloseInstancePublicPortsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance for which to close ports.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+
+ // An object to describe the ports to close for the specified instance.
+ //
+ // PortInfo is a required field
+ PortInfo *PortInfo `locationName:"portInfo" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s CloseInstancePublicPortsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloseInstancePublicPortsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CloseInstancePublicPortsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CloseInstancePublicPortsInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+ if s.PortInfo == nil {
+ invalidParams.Add(request.NewErrParamRequired("PortInfo"))
+ }
+ if s.PortInfo != nil {
+ if err := s.PortInfo.Validate(); err != nil {
+ invalidParams.AddNested("PortInfo", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *CloseInstancePublicPortsInput) SetInstanceName(v string) *CloseInstancePublicPortsInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetPortInfo sets the PortInfo field's value.
+func (s *CloseInstancePublicPortsInput) SetPortInfo(v *PortInfo) *CloseInstancePublicPortsInput {
+ s.PortInfo = v
+ return s
+}
+
+type CloseInstancePublicPortsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the result of the action, such as the status of
+ // the request, the timestamp of the request, and the resources affected by
+ // the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s CloseInstancePublicPortsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloseInstancePublicPortsOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *CloseInstancePublicPortsOutput) SetOperation(v *Operation) *CloseInstancePublicPortsOutput {
+ s.Operation = v
+ return s
+}
+
+// Describes a CloudFormation stack record created as a result of the create
+// cloud formation stack action.
+//
+// A CloudFormation stack record provides information about the AWS CloudFormation
+// stack used to create a new Amazon Elastic Compute Cloud instance from an
+// exported Lightsail instance snapshot.
+type CloudFormationStackRecord struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the CloudFormation stack record.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The date when the CloudFormation stack record was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // A list of objects describing the destination service, which is AWS CloudFormation,
+ // and the Amazon Resource Name (ARN) of the AWS CloudFormation stack.
+ DestinationInfo *DestinationInfo `locationName:"destinationInfo" type:"structure"`
+
+ // A list of objects describing the Availability Zone and AWS Region of the
+ // CloudFormation stack record.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the CloudFormation stack record. It starts with CloudFormationStackRecord
+ // followed by a GUID.
+ Name *string `locationName:"name" type:"string"`
+
+ // The Lightsail resource type (e.g., CloudFormationStackRecord).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // A list of objects describing the source of the CloudFormation stack record.
+ SourceInfo []*CloudFormationStackRecordSourceInfo `locationName:"sourceInfo" type:"list"`
+
+ // The current state of the CloudFormation stack record.
+ State *string `locationName:"state" type:"string" enum:"RecordState"`
+}
+
+// String returns the string representation
+func (s CloudFormationStackRecord) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloudFormationStackRecord) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *CloudFormationStackRecord) SetArn(v string) *CloudFormationStackRecord {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *CloudFormationStackRecord) SetCreatedAt(v time.Time) *CloudFormationStackRecord {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDestinationInfo sets the DestinationInfo field's value.
+func (s *CloudFormationStackRecord) SetDestinationInfo(v *DestinationInfo) *CloudFormationStackRecord {
+ s.DestinationInfo = v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *CloudFormationStackRecord) SetLocation(v *ResourceLocation) *CloudFormationStackRecord {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *CloudFormationStackRecord) SetName(v string) *CloudFormationStackRecord {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *CloudFormationStackRecord) SetResourceType(v string) *CloudFormationStackRecord {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSourceInfo sets the SourceInfo field's value.
+func (s *CloudFormationStackRecord) SetSourceInfo(v []*CloudFormationStackRecordSourceInfo) *CloudFormationStackRecord {
+ s.SourceInfo = v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *CloudFormationStackRecord) SetState(v string) *CloudFormationStackRecord {
+ s.State = &v
+ return s
+}
+
+// Describes the source of a CloudFormation stack record (i.e., the export snapshot
+// record).
+type CloudFormationStackRecordSourceInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the export snapshot record.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The name of the record.
+ Name *string `locationName:"name" type:"string"`
+
+ // The Lightsail resource type (e.g., ExportSnapshotRecord).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"CloudFormationStackRecordSourceType"`
+}
+
+// String returns the string representation
+func (s CloudFormationStackRecordSourceInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloudFormationStackRecordSourceInfo) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *CloudFormationStackRecordSourceInfo) SetArn(v string) *CloudFormationStackRecordSourceInfo {
+ s.Arn = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *CloudFormationStackRecordSourceInfo) SetName(v string) *CloudFormationStackRecordSourceInfo {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *CloudFormationStackRecordSourceInfo) SetResourceType(v string) *CloudFormationStackRecordSourceInfo {
+ s.ResourceType = &v
+ return s
+}
+
+// Describes a contact method.
+//
+// A contact method is a way to send you notifications. For more information,
+// see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications).
+type ContactMethod struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the contact method.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The destination of the contact method, such as an email address or a mobile
+ // phone number.
+ ContactEndpoint *string `locationName:"contactEndpoint" type:"string"`
+
+ // The timestamp when the contact method was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // Describes the resource location.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the contact method.
+ Name *string `locationName:"name" type:"string"`
+
+ // The protocol of the contact method, such as email or SMS (text messaging).
+ Protocol *string `locationName:"protocol" type:"string" enum:"ContactProtocol"`
+
+ // The Lightsail resource type (e.g., ContactMethod).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The current status of the contact method.
+ //
+ // A contact method has the following possible status:
+ //
+ // * PendingVerification - The contact method has not yet been verified,
+ // and the verification has not yet expired.
+ //
+ // * Valid - The contact method has been verified.
+ //
+ // * InValid - An attempt was made to verify the contact method, but the
+ // verification has expired.
+ Status *string `locationName:"status" type:"string" enum:"ContactMethodStatus"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about your Lightsail contact method. This code enables our support
+ // team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+}
+
+// String returns the string representation
+func (s ContactMethod) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContactMethod) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *ContactMethod) SetArn(v string) *ContactMethod {
+ s.Arn = &v
+ return s
+}
+
+// SetContactEndpoint sets the ContactEndpoint field's value.
+func (s *ContactMethod) SetContactEndpoint(v string) *ContactMethod {
+ s.ContactEndpoint = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *ContactMethod) SetCreatedAt(v time.Time) *ContactMethod {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *ContactMethod) SetLocation(v *ResourceLocation) *ContactMethod {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ContactMethod) SetName(v string) *ContactMethod {
+ s.Name = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *ContactMethod) SetProtocol(v string) *ContactMethod {
+ s.Protocol = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *ContactMethod) SetResourceType(v string) *ContactMethod {
+ s.ResourceType = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ContactMethod) SetStatus(v string) *ContactMethod {
+ s.Status = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *ContactMethod) SetSupportCode(v string) *ContactMethod {
+ s.SupportCode = &v
+ return s
+}
+
+// Describes the settings of a container that will be launched, or that is launched,
+// to an Amazon Lightsail container service.
+type Container struct {
+ _ struct{} `type:"structure"`
+
+ // The launch command for the container.
+ Command []*string `locationName:"command" type:"list"`
+
+ // The environment variables of the container.
+ Environment map[string]*string `locationName:"environment" type:"map"`
+
+ // The name of the image used for the container.
+ //
+ // Container images sourced from your Lightsail container service, that are
+ // registered and stored on your service, start with a colon (:). For example,
+ // :container-service-1.mystaticwebsite.1. Container images sourced from a public
+ // registry like Docker Hub don't start with a colon. For example, nginx:latest
+ // or nginx.
+ Image *string `locationName:"image" type:"string"`
+
+ // The open firewall ports of the container.
+ Ports map[string]*string `locationName:"ports" type:"map"`
+}
+
+// String returns the string representation
+func (s Container) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Container) GoString() string {
+ return s.String()
+}
+
+// SetCommand sets the Command field's value.
+func (s *Container) SetCommand(v []*string) *Container {
+ s.Command = v
+ return s
+}
+
+// SetEnvironment sets the Environment field's value.
+func (s *Container) SetEnvironment(v map[string]*string) *Container {
+ s.Environment = v
+ return s
+}
+
+// SetImage sets the Image field's value.
+func (s *Container) SetImage(v string) *Container {
+ s.Image = &v
+ return s
+}
+
+// SetPorts sets the Ports field's value.
+func (s *Container) SetPorts(v map[string]*string) *Container {
+ s.Ports = v
+ return s
+}
+
+// Describes a container image that is registered to an Amazon Lightsail container
+// service.
+type ContainerImage struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp when the container image was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The digest of the container image.
+ Digest *string `locationName:"digest" type:"string"`
+
+ // The name of the container image.
+ Image *string `locationName:"image" type:"string"`
+}
+
+// String returns the string representation
+func (s ContainerImage) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerImage) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *ContainerImage) SetCreatedAt(v time.Time) *ContainerImage {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDigest sets the Digest field's value.
+func (s *ContainerImage) SetDigest(v string) *ContainerImage {
+ s.Digest = &v
+ return s
+}
+
+// SetImage sets the Image field's value.
+func (s *ContainerImage) SetImage(v string) *ContainerImage {
+ s.Image = &v
+ return s
+}
+
+// Describes an Amazon Lightsail container service.
+type ContainerService struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the container service.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The name of the container service.
+ ContainerServiceName *string `locationName:"containerServiceName" min:"1" type:"string"`
+
+ // The timestamp when the container service was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // An object that describes the current container deployment of the container
+ // service.
+ CurrentDeployment *ContainerServiceDeployment `locationName:"currentDeployment" type:"structure"`
+
+ // A Boolean value indicating whether the container service is disabled.
+ IsDisabled *bool `locationName:"isDisabled" type:"boolean"`
+
+ // An object that describes the location of the container service, such as the
+ // AWS Region and Availability Zone.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // An object that describes the next deployment of the container service.
+ //
+ // This value is null when there is no deployment in a pending state.
+ NextDeployment *ContainerServiceDeployment `locationName:"nextDeployment" type:"structure"`
+
+ // The power specification of the container service.
+ //
+ // The power specifies the amount of RAM, the number of vCPUs, and the base
+ // price of the container service.
+ Power *string `locationName:"power" type:"string" enum:"ContainerServicePowerName"`
+
+ // The ID of the power of the container service.
+ PowerId *string `locationName:"powerId" type:"string"`
+
+ // The principal ARN of the container service.
+ //
+ // The principal ARN can be used to create a trust relationship between your
+ // standard AWS account and your Lightsail container service. This allows you
+ // to give your service permission to access resources in your standard AWS
+ // account.
+ PrincipalArn *string `locationName:"principalArn" type:"string"`
+
+ // The private domain name of the container service.
+ //
+ // The private domain name is accessible only by other resources within the
+ // default virtual private cloud (VPC) of your Lightsail account.
+ PrivateDomainName *string `locationName:"privateDomainName" type:"string"`
+
+ // The public domain name of the container service, such as example.com and
+ // www.example.com.
+ //
+ // You can specify up to four public domain names for a container service. The
+ // domain names that you specify are used when you create a deployment with
+ // a container configured as the public endpoint of your container service.
+ //
+ // If you don't specify public domain names, then you can use the default domain
+ // of the container service.
+ //
+ // You must create and validate an SSL/TLS certificate before you can use public
+ // domain names with your container service. Use the CreateCertificate action
+ // to create a certificate for the public domain names you want to use with
+ // your container service.
+ //
+ // See CreateContainerService or UpdateContainerService for information about
+ // how to specify public domain names for your Lightsail container service.
+ PublicDomainNames map[string][]*string `locationName:"publicDomainNames" type:"map"`
+
+ // The Lightsail resource type of the container service (i.e., ContainerService).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The scale specification of the container service.
+ //
+ // The scale specifies the allocated compute nodes of the container service.
+ Scale *int64 `locationName:"scale" min:"1" type:"integer"`
+
+ // The current state of the container service.
+ //
+ // The following container service states are possible:
+ //
+ // * PENDING - The container service is being created.
+ //
+ // * READY - The container service is running but it does not have an active
+ // container deployment.
+ //
+ // * DEPLOYING - The container service is launching a container deployment.
+ //
+ // * RUNNING - The container service is running and it has an active container
+ // deployment.
+ //
+ // * UPDATING - The container service capacity or its custom domains are
+ // being updated.
+ //
+ // * DELETING - The container service is being deleted.
+ //
+ // * DISABLED - The container service is disabled, and its active deployment
+ // and containers, if any, are shut down.
+ State *string `locationName:"state" type:"string" enum:"ContainerServiceState"`
+
+ // An object that describes the current state of the container service.
+ //
+ // The state detail is populated only when a container service is in a PENDING,
+ // DEPLOYING, or UPDATING state.
+ StateDetail *ContainerServiceStateDetail `locationName:"stateDetail" type:"structure"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // The publicly accessible URL of the container service.
+ //
+ // If no public endpoint is specified in the currentDeployment, this URL returns
+ // a 404 response.
+ Url *string `locationName:"url" type:"string"`
+}
+
+// String returns the string representation
+func (s ContainerService) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerService) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *ContainerService) SetArn(v string) *ContainerService {
+ s.Arn = &v
+ return s
+}
+
+// SetContainerServiceName sets the ContainerServiceName field's value.
+func (s *ContainerService) SetContainerServiceName(v string) *ContainerService {
+ s.ContainerServiceName = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *ContainerService) SetCreatedAt(v time.Time) *ContainerService {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetCurrentDeployment sets the CurrentDeployment field's value.
+func (s *ContainerService) SetCurrentDeployment(v *ContainerServiceDeployment) *ContainerService {
+ s.CurrentDeployment = v
+ return s
+}
+
+// SetIsDisabled sets the IsDisabled field's value.
+func (s *ContainerService) SetIsDisabled(v bool) *ContainerService {
+ s.IsDisabled = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *ContainerService) SetLocation(v *ResourceLocation) *ContainerService {
+ s.Location = v
+ return s
+}
+
+// SetNextDeployment sets the NextDeployment field's value.
+func (s *ContainerService) SetNextDeployment(v *ContainerServiceDeployment) *ContainerService {
+ s.NextDeployment = v
+ return s
+}
+
+// SetPower sets the Power field's value.
+func (s *ContainerService) SetPower(v string) *ContainerService {
+ s.Power = &v
+ return s
+}
+
+// SetPowerId sets the PowerId field's value.
+func (s *ContainerService) SetPowerId(v string) *ContainerService {
+ s.PowerId = &v
+ return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *ContainerService) SetPrincipalArn(v string) *ContainerService {
+ s.PrincipalArn = &v
+ return s
+}
+
+// SetPrivateDomainName sets the PrivateDomainName field's value.
+func (s *ContainerService) SetPrivateDomainName(v string) *ContainerService {
+ s.PrivateDomainName = &v
+ return s
+}
+
+// SetPublicDomainNames sets the PublicDomainNames field's value.
+func (s *ContainerService) SetPublicDomainNames(v map[string][]*string) *ContainerService {
+ s.PublicDomainNames = v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *ContainerService) SetResourceType(v string) *ContainerService {
+ s.ResourceType = &v
+ return s
+}
+
+// SetScale sets the Scale field's value.
+func (s *ContainerService) SetScale(v int64) *ContainerService {
+ s.Scale = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *ContainerService) SetState(v string) *ContainerService {
+ s.State = &v
+ return s
+}
+
+// SetStateDetail sets the StateDetail field's value.
+func (s *ContainerService) SetStateDetail(v *ContainerServiceStateDetail) *ContainerService {
+ s.StateDetail = v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *ContainerService) SetTags(v []*Tag) *ContainerService {
+ s.Tags = v
+ return s
+}
+
+// SetUrl sets the Url field's value.
+func (s *ContainerService) SetUrl(v string) *ContainerService {
+ s.Url = &v
+ return s
+}
+
+// Describes a container deployment configuration of an Amazon Lightsail container
+// service.
+//
+// A deployment specifies the settings, such as the ports and launch command,
+// of containers that are deployed to your container service.
+type ContainerServiceDeployment struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the configuration for the containers of the deployment.
+ Containers map[string]*Container `locationName:"containers" type:"map"`
+
+ // The timestamp when the deployment was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // An object that describes the endpoint of the deployment.
+ PublicEndpoint *ContainerServiceEndpoint `locationName:"publicEndpoint" type:"structure"`
+
+ // The state of the deployment.
+ //
+ // A deployment can be in one of the following states:
+ //
+ // * Activating - The deployment is being created.
+ //
+ // * Active - The deployment was successfully created, and it's currently
+ // running on the container service. The container service can have only
+ // one deployment in an active state at a time.
+ //
+ // * Inactive - The deployment was previously successfully created, but it
+ // is not currently running on the container service.
+ //
+ // * Failed - The deployment failed. Use the GetContainerLog action to view
+ // the log events for the containers in the deployment to try to determine
+ // the reason for the failure.
+ State *string `locationName:"state" type:"string" enum:"ContainerServiceDeploymentState"`
+
+ // The version number of the deployment.
+ Version *int64 `locationName:"version" type:"integer"`
+}
+
+// String returns the string representation
+func (s ContainerServiceDeployment) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServiceDeployment) GoString() string {
+ return s.String()
+}
+
+// SetContainers sets the Containers field's value.
+func (s *ContainerServiceDeployment) SetContainers(v map[string]*Container) *ContainerServiceDeployment {
+ s.Containers = v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *ContainerServiceDeployment) SetCreatedAt(v time.Time) *ContainerServiceDeployment {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetPublicEndpoint sets the PublicEndpoint field's value.
+func (s *ContainerServiceDeployment) SetPublicEndpoint(v *ContainerServiceEndpoint) *ContainerServiceDeployment {
+ s.PublicEndpoint = v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *ContainerServiceDeployment) SetState(v string) *ContainerServiceDeployment {
+ s.State = &v
+ return s
+}
+
+// SetVersion sets the Version field's value.
+func (s *ContainerServiceDeployment) SetVersion(v int64) *ContainerServiceDeployment {
+ s.Version = &v
+ return s
+}
+
+// Describes a container deployment configuration of an Amazon Lightsail container
+// service.
+//
+// A deployment specifies the settings, such as the ports and launch command,
+// of containers that are deployed to your container service.
+type ContainerServiceDeploymentRequest struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the configuration for the containers of the deployment.
+ Containers map[string]*Container `locationName:"containers" type:"map"`
+
+ // An object that describes the endpoint of the deployment.
+ PublicEndpoint *EndpointRequest `locationName:"publicEndpoint" type:"structure"`
+}
+
+// String returns the string representation
+func (s ContainerServiceDeploymentRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServiceDeploymentRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ContainerServiceDeploymentRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ContainerServiceDeploymentRequest"}
+ if s.PublicEndpoint != nil {
+ if err := s.PublicEndpoint.Validate(); err != nil {
+ invalidParams.AddNested("PublicEndpoint", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetContainers sets the Containers field's value.
+func (s *ContainerServiceDeploymentRequest) SetContainers(v map[string]*Container) *ContainerServiceDeploymentRequest {
+ s.Containers = v
+ return s
+}
+
+// SetPublicEndpoint sets the PublicEndpoint field's value.
+func (s *ContainerServiceDeploymentRequest) SetPublicEndpoint(v *EndpointRequest) *ContainerServiceDeploymentRequest {
+ s.PublicEndpoint = v
+ return s
+}
+
+// Describes the public endpoint configuration of a deployment of an Amazon
+// Lightsail container service.
+type ContainerServiceEndpoint struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container entry of the deployment that the endpoint configuration
+ // applies to.
+ ContainerName *string `locationName:"containerName" type:"string"`
+
+ // The port of the specified container to which traffic is forwarded to.
+ ContainerPort *int64 `locationName:"containerPort" type:"integer"`
+
+ // An object that describes the health check configuration of the container.
+ HealthCheck *ContainerServiceHealthCheckConfig `locationName:"healthCheck" type:"structure"`
+}
+
+// String returns the string representation
+func (s ContainerServiceEndpoint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServiceEndpoint) GoString() string {
+ return s.String()
+}
+
+// SetContainerName sets the ContainerName field's value.
+func (s *ContainerServiceEndpoint) SetContainerName(v string) *ContainerServiceEndpoint {
+ s.ContainerName = &v
+ return s
+}
+
+// SetContainerPort sets the ContainerPort field's value.
+func (s *ContainerServiceEndpoint) SetContainerPort(v int64) *ContainerServiceEndpoint {
+ s.ContainerPort = &v
+ return s
+}
+
+// SetHealthCheck sets the HealthCheck field's value.
+func (s *ContainerServiceEndpoint) SetHealthCheck(v *ContainerServiceHealthCheckConfig) *ContainerServiceEndpoint {
+ s.HealthCheck = v
+ return s
+}
+
+// Describes the health check configuration of an Amazon Lightsail container
+// service.
+type ContainerServiceHealthCheckConfig struct {
+ _ struct{} `type:"structure"`
+
+ // The number of consecutive health checks successes required before moving
+ // the container to the Healthy state. The default value is 2.
+ HealthyThreshold *int64 `locationName:"healthyThreshold" type:"integer"`
+
+ // The approximate interval, in seconds, between health checks of an individual
+ // container. You can specify between 5 and 300 seconds. The default value is
+ // 5.
+ IntervalSeconds *int64 `locationName:"intervalSeconds" type:"integer"`
+
+ // The path on the container on which to perform the health check. The default
+ // value is /.
+ Path *string `locationName:"path" type:"string"`
+
+ // The HTTP codes to use when checking for a successful response from a container.
+ // You can specify values between 200 and 499. You can specify multiple values
+ // (for example, 200,202) or a range of values (for example, 200-299).
+ SuccessCodes *string `locationName:"successCodes" type:"string"`
+
+ // The amount of time, in seconds, during which no response means a failed health
+ // check. You can specify between 2 and 60 seconds. The default value is 2.
+ TimeoutSeconds *int64 `locationName:"timeoutSeconds" type:"integer"`
+
+ // The number of consecutive health check failures required before moving the
+ // container to the Unhealthy state. The default value is 2.
+ UnhealthyThreshold *int64 `locationName:"unhealthyThreshold" type:"integer"`
+}
+
+// String returns the string representation
+func (s ContainerServiceHealthCheckConfig) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServiceHealthCheckConfig) GoString() string {
+ return s.String()
+}
+
+// SetHealthyThreshold sets the HealthyThreshold field's value.
+func (s *ContainerServiceHealthCheckConfig) SetHealthyThreshold(v int64) *ContainerServiceHealthCheckConfig {
+ s.HealthyThreshold = &v
+ return s
+}
+
+// SetIntervalSeconds sets the IntervalSeconds field's value.
+func (s *ContainerServiceHealthCheckConfig) SetIntervalSeconds(v int64) *ContainerServiceHealthCheckConfig {
+ s.IntervalSeconds = &v
+ return s
+}
+
+// SetPath sets the Path field's value.
+func (s *ContainerServiceHealthCheckConfig) SetPath(v string) *ContainerServiceHealthCheckConfig {
+ s.Path = &v
+ return s
+}
+
+// SetSuccessCodes sets the SuccessCodes field's value.
+func (s *ContainerServiceHealthCheckConfig) SetSuccessCodes(v string) *ContainerServiceHealthCheckConfig {
+ s.SuccessCodes = &v
+ return s
+}
+
+// SetTimeoutSeconds sets the TimeoutSeconds field's value.
+func (s *ContainerServiceHealthCheckConfig) SetTimeoutSeconds(v int64) *ContainerServiceHealthCheckConfig {
+ s.TimeoutSeconds = &v
+ return s
+}
+
+// SetUnhealthyThreshold sets the UnhealthyThreshold field's value.
+func (s *ContainerServiceHealthCheckConfig) SetUnhealthyThreshold(v int64) *ContainerServiceHealthCheckConfig {
+ s.UnhealthyThreshold = &v
+ return s
+}
+
+// Describes the log events of a container of an Amazon Lightsail container
+// service.
+type ContainerServiceLogEvent struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp when the container service log event was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The message of the container service log event.
+ Message *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s ContainerServiceLogEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServiceLogEvent) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *ContainerServiceLogEvent) SetCreatedAt(v time.Time) *ContainerServiceLogEvent {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *ContainerServiceLogEvent) SetMessage(v string) *ContainerServiceLogEvent {
+ s.Message = &v
+ return s
+}
+
+// Describes the powers that can be specified for an Amazon Lightsail container
+// service.
+//
+// The power specifies the amount of RAM, the number of vCPUs, and the base
+// price of the container service.
+type ContainerServicePower struct {
+ _ struct{} `type:"structure"`
+
+ // The number of vCPUs included in the power.
+ CpuCount *float64 `locationName:"cpuCount" type:"float"`
+
+ // A Boolean value indicating whether the power is active and can be specified
+ // for container services.
+ IsActive *bool `locationName:"isActive" type:"boolean"`
+
+ // The friendly name of the power (e.g., nano).
+ Name *string `locationName:"name" type:"string"`
+
+ // The ID of the power (e.g., nano-1).
+ PowerId *string `locationName:"powerId" type:"string"`
+
+ // The monthly price of the power in USD.
+ Price *float64 `locationName:"price" type:"float"`
+
+ // The amount of RAM (in GB) of the power.
+ RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"`
+}
+
+// String returns the string representation
+func (s ContainerServicePower) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServicePower) GoString() string {
+ return s.String()
+}
+
+// SetCpuCount sets the CpuCount field's value.
+func (s *ContainerServicePower) SetCpuCount(v float64) *ContainerServicePower {
+ s.CpuCount = &v
+ return s
+}
+
+// SetIsActive sets the IsActive field's value.
+func (s *ContainerServicePower) SetIsActive(v bool) *ContainerServicePower {
+ s.IsActive = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ContainerServicePower) SetName(v string) *ContainerServicePower {
+ s.Name = &v
+ return s
+}
+
+// SetPowerId sets the PowerId field's value.
+func (s *ContainerServicePower) SetPowerId(v string) *ContainerServicePower {
+ s.PowerId = &v
+ return s
+}
+
+// SetPrice sets the Price field's value.
+func (s *ContainerServicePower) SetPrice(v float64) *ContainerServicePower {
+ s.Price = &v
+ return s
+}
+
+// SetRamSizeInGb sets the RamSizeInGb field's value.
+func (s *ContainerServicePower) SetRamSizeInGb(v float64) *ContainerServicePower {
+ s.RamSizeInGb = &v
+ return s
+}
+
+// Describes the login information for the container image registry of an Amazon
+// Lightsail account.
+type ContainerServiceRegistryLogin struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp of when the container image registry username and password
+ // expire.
+ //
+ // The log in credentials expire 12 hours after they are created, at which point
+ // you will need to create a new set of log in credentials using the CreateContainerServiceRegistryLogin
+ // action.
+ ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp"`
+
+ // The container service registry password to use to push container images to
+ // the container image registry of a Lightsail account
+ Password *string `locationName:"password" type:"string"`
+
+ // The address to use to push container images to the container image registry
+ // of a Lightsail account.
+ Registry *string `locationName:"registry" type:"string"`
+
+ // The container service registry username to use to push container images to
+ // the container image registry of a Lightsail account.
+ Username *string `locationName:"username" type:"string"`
+}
+
+// String returns the string representation
+func (s ContainerServiceRegistryLogin) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServiceRegistryLogin) GoString() string {
+ return s.String()
+}
+
+// SetExpiresAt sets the ExpiresAt field's value.
+func (s *ContainerServiceRegistryLogin) SetExpiresAt(v time.Time) *ContainerServiceRegistryLogin {
+ s.ExpiresAt = &v
+ return s
+}
+
+// SetPassword sets the Password field's value.
+func (s *ContainerServiceRegistryLogin) SetPassword(v string) *ContainerServiceRegistryLogin {
+ s.Password = &v
+ return s
+}
+
+// SetRegistry sets the Registry field's value.
+func (s *ContainerServiceRegistryLogin) SetRegistry(v string) *ContainerServiceRegistryLogin {
+ s.Registry = &v
+ return s
+}
+
+// SetUsername sets the Username field's value.
+func (s *ContainerServiceRegistryLogin) SetUsername(v string) *ContainerServiceRegistryLogin {
+ s.Username = &v
+ return s
+}
+
+// Describes the current state of a container service.
+type ContainerServiceStateDetail struct {
+ _ struct{} `type:"structure"`
+
+ // The state code of the container service.
+ //
+ // The following state codes are possible:
+ //
+ // * The following state codes are possible if your container service is
+ // in a DEPLOYING or UPDATING state: CREATING_SYSTEM_RESOURCES - The system
+ // resources for your container service are being created. CREATING_NETWORK_INFRASTRUCTURE
+ // - The network infrastructure for your container service are being created.
+ // PROVISIONING_CERTIFICATE - The SSL/TLS certificate for your container
+ // service is being created. PROVISIONING_SERVICE - Your container service
+ // is being provisioned. CREATING_DEPLOYMENT - Your deployment is being created
+ // on your container service. EVALUATING_HEALTH_CHECK - The health of your
+ // deployment is being evaluated. ACTIVATING_DEPLOYMENT - Your deployment
+ // is being activated.
+ //
+ // * The following state codes are possible if your container service is
+ // in a PENDING state: CERTIFICATE_LIMIT_EXCEEDED - The SSL/TLS certificate
+ // required for your container service exceeds the maximum number of certificates
+ // allowed for your account. UNKNOWN_ERROR - An error was experienced when
+ // your container service was being created.
+ Code *string `locationName:"code" type:"string" enum:"ContainerServiceStateDetailCode"`
+
+ // A message that provides more information for the state code.
+ //
+ // The state detail is populated only when a container service is in a PENDING,
+ // DEPLOYING, or UPDATING state.
+ Message *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s ContainerServiceStateDetail) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContainerServiceStateDetail) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *ContainerServiceStateDetail) SetCode(v string) *ContainerServiceStateDetail {
+ s.Code = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *ContainerServiceStateDetail) SetMessage(v string) *ContainerServiceStateDetail {
+ s.Message = &v
+ return s
+}
+
+// Describes whether an Amazon Lightsail content delivery network (CDN) distribution
+// forwards cookies to the origin and, if so, which ones.
+//
+// For the cookies that you specify, your distribution caches separate versions
+// of the specified content based on the cookie values in viewer requests.
+type CookieObject struct {
+ _ struct{} `type:"structure"`
+
+ // The specific cookies to forward to your distribution's origin.
+ CookiesAllowList []*string `locationName:"cookiesAllowList" type:"list"`
+
+ // Specifies which cookies to forward to the distribution's origin for a cache
+ // behavior: all, none, or allow-list to forward only the cookies specified
+ // in the cookiesAllowList parameter.
+ Option *string `locationName:"option" type:"string" enum:"ForwardValues"`
+}
+
+// String returns the string representation
+func (s CookieObject) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CookieObject) GoString() string {
+ return s.String()
+}
+
+// SetCookiesAllowList sets the CookiesAllowList field's value.
+func (s *CookieObject) SetCookiesAllowList(v []*string) *CookieObject {
+ s.CookiesAllowList = v
+ return s
+}
+
+// SetOption sets the Option field's value.
+func (s *CookieObject) SetOption(v string) *CookieObject {
+ s.Option = &v
+ return s
+}
+
+type CopySnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The date of the source automatic snapshot to copy. Use the get auto snapshots
+ // operation to identify the dates of the available automatic snapshots.
+ //
+ // Constraints:
+ //
+ // * Must be specified in YYYY-MM-DD format.
+ //
+ // * This parameter cannot be defined together with the use latest restorable
+ // auto snapshot parameter. The restore date and use latest restorable auto
+ // snapshot parameters are mutually exclusive.
+ //
+ // * Define this parameter only when copying an automatic snapshot as a manual
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots).
+ RestoreDate *string `locationName:"restoreDate" type:"string"`
+
+ // The AWS Region where the source manual or automatic snapshot is located.
+ //
+ // SourceRegion is a required field
+ SourceRegion *string `locationName:"sourceRegion" type:"string" required:"true" enum:"RegionName"`
+
+ // The name of the source instance or disk from which the source automatic snapshot
+ // was created.
+ //
+ // Constraint:
+ //
+ // * Define this parameter only when copying an automatic snapshot as a manual
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots).
+ SourceResourceName *string `locationName:"sourceResourceName" type:"string"`
+
+ // The name of the source manual snapshot to copy.
+ //
+ // Constraint:
+ //
+ // * Define this parameter only when copying a manual snapshot as another
+ // manual snapshot.
+ SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string"`
+
+ // The name of the new manual snapshot to be created as a copy.
+ //
+ // TargetSnapshotName is a required field
+ TargetSnapshotName *string `locationName:"targetSnapshotName" type:"string" required:"true"`
+
+ // A Boolean value to indicate whether to use the latest available automatic
+ // snapshot of the specified source instance or disk.
+ //
+ // Constraints:
+ //
+ // * This parameter cannot be defined together with the restore date parameter.
+ // The use latest restorable auto snapshot and restore date parameters are
+ // mutually exclusive.
+ //
+ // * Define this parameter only when copying an automatic snapshot as a manual
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots).
+ UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"`
+}
+
+// String returns the string representation
+func (s CopySnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopySnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CopySnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"}
+ if s.SourceRegion == nil {
+ invalidParams.Add(request.NewErrParamRequired("SourceRegion"))
+ }
+ if s.TargetSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRestoreDate sets the RestoreDate field's value.
+func (s *CopySnapshotInput) SetRestoreDate(v string) *CopySnapshotInput {
+ s.RestoreDate = &v
+ return s
+}
+
+// SetSourceRegion sets the SourceRegion field's value.
+func (s *CopySnapshotInput) SetSourceRegion(v string) *CopySnapshotInput {
+ s.SourceRegion = &v
+ return s
+}
+
+// SetSourceResourceName sets the SourceResourceName field's value.
+func (s *CopySnapshotInput) SetSourceResourceName(v string) *CopySnapshotInput {
+ s.SourceResourceName = &v
+ return s
+}
+
+// SetSourceSnapshotName sets the SourceSnapshotName field's value.
+func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput {
+ s.SourceSnapshotName = &v
+ return s
+}
+
+// SetTargetSnapshotName sets the TargetSnapshotName field's value.
+func (s *CopySnapshotInput) SetTargetSnapshotName(v string) *CopySnapshotInput {
+ s.TargetSnapshotName = &v
+ return s
+}
+
+// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value.
+func (s *CopySnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CopySnapshotInput {
+ s.UseLatestRestorableAutoSnapshot = &v
+ return s
+}
+
+type CopySnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CopySnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopySnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CopySnapshotOutput) SetOperations(v []*Operation) *CopySnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateBucketAccessKeyInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket that the new access key will belong to, and grant
+ // access to.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateBucketAccessKeyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketAccessKeyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBucketAccessKeyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateBucketAccessKeyInput"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *CreateBucketAccessKeyInput) SetBucketName(v string) *CreateBucketAccessKeyInput {
+ s.BucketName = &v
+ return s
+}
+
+type CreateBucketAccessKeyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the access key that is created.
+ AccessKey *AccessKey `locationName:"accessKey" type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateBucketAccessKeyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketAccessKeyOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccessKey sets the AccessKey field's value.
+func (s *CreateBucketAccessKeyOutput) SetAccessKey(v *AccessKey) *CreateBucketAccessKeyOutput {
+ s.AccessKey = v
+ return s
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateBucketAccessKeyOutput) SetOperations(v []*Operation) *CreateBucketAccessKeyOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name for the bucket.
+ //
+ // For more information about bucket names, see Bucket naming rules in Amazon
+ // Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/bucket-naming-rules-in-amazon-lightsail)
+ // in the Amazon Lightsail Developer Guide.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+
+ // The ID of the bundle to use for the bucket.
+ //
+ // A bucket bundle specifies the monthly cost, storage space, and data transfer
+ // quota for a bucket.
+ //
+ // Use the GetBucketBundles action to get a list of bundle IDs that you can
+ // specify.
+ //
+ // Use the UpdateBucketBundle action to change the bundle after the bucket is
+ // created.
+ //
+ // BundleId is a required field
+ BundleId *string `locationName:"bundleId" type:"string" required:"true"`
+
+ // A Boolean value that indicates whether to enable versioning of objects in
+ // the bucket.
+ //
+ // For more information about versioning, see Enabling and suspending object
+ // versioning in a bucket in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-managing-bucket-object-versioning)
+ // in the Amazon Lightsail Developer Guide.
+ EnableObjectVersioning *bool `locationName:"enableObjectVersioning" type:"boolean"`
+
+ // The tag keys and optional values to add to the bucket during creation.
+ //
+ // Use the TagResource action to tag the bucket after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+ if s.BundleId == nil {
+ invalidParams.Add(request.NewErrParamRequired("BundleId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *CreateBucketInput) SetBucketName(v string) *CreateBucketInput {
+ s.BucketName = &v
+ return s
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *CreateBucketInput) SetBundleId(v string) *CreateBucketInput {
+ s.BundleId = &v
+ return s
+}
+
+// SetEnableObjectVersioning sets the EnableObjectVersioning field's value.
+func (s *CreateBucketInput) SetEnableObjectVersioning(v bool) *CreateBucketInput {
+ s.EnableObjectVersioning = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateBucketInput) SetTags(v []*Tag) *CreateBucketInput {
+ s.Tags = v
+ return s
+}
+
+type CreateBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the bucket that is created.
+ Bucket *Bucket `locationName:"bucket" type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateBucketOutput) SetBucket(v *Bucket) *CreateBucketOutput {
+ s.Bucket = v
+ return s
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateBucketOutput) SetOperations(v []*Operation) *CreateBucketOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateCertificateInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name for the certificate.
+ //
+ // CertificateName is a required field
+ CertificateName *string `locationName:"certificateName" type:"string" required:"true"`
+
+ // The domain name (e.g., example.com) for the certificate.
+ //
+ // DomainName is a required field
+ DomainName *string `locationName:"domainName" type:"string" required:"true"`
+
+ // An array of strings that specify the alternate domains (e.g., example2.com)
+ // and subdomains (e.g., blog.example.com) for the certificate.
+ //
+ // You can specify a maximum of nine alternate domains (in addition to the primary
+ // domain name).
+ //
+ // Wildcard domain entries (e.g., *.example.com) are not supported.
+ SubjectAlternativeNames []*string `locationName:"subjectAlternativeNames" type:"list"`
+
+ // The tag keys and optional values to add to the certificate during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateCertificateInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateCertificateInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateCertificateInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateCertificateInput"}
+ if s.CertificateName == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateName"))
+ }
+ if s.DomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *CreateCertificateInput) SetCertificateName(v string) *CreateCertificateInput {
+ s.CertificateName = &v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *CreateCertificateInput) SetDomainName(v string) *CreateCertificateInput {
+ s.DomainName = &v
+ return s
+}
+
+// SetSubjectAlternativeNames sets the SubjectAlternativeNames field's value.
+func (s *CreateCertificateInput) SetSubjectAlternativeNames(v []*string) *CreateCertificateInput {
+ s.SubjectAlternativeNames = v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateCertificateInput) SetTags(v []*Tag) *CreateCertificateInput {
+ s.Tags = v
+ return s
+}
+
+type CreateCertificateOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the certificate created.
+ Certificate *CertificateSummary `locationName:"certificate" type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateCertificateOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateCertificateOutput) GoString() string {
+ return s.String()
+}
+
+// SetCertificate sets the Certificate field's value.
+func (s *CreateCertificateOutput) SetCertificate(v *CertificateSummary) *CreateCertificateOutput {
+ s.Certificate = v
+ return s
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateCertificateOutput) SetOperations(v []*Operation) *CreateCertificateOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateCloudFormationStackInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of parameters that will be used to create the new Amazon EC2 instance.
+ // You can only pass one instance entry at a time in this array. You will get
+ // an invalid parameter error if you pass more than one instance entry in this
+ // array.
+ //
+ // Instances is a required field
+ Instances []*InstanceEntry `locationName:"instances" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateCloudFormationStackInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateCloudFormationStackInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateCloudFormationStackInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateCloudFormationStackInput"}
+ if s.Instances == nil {
+ invalidParams.Add(request.NewErrParamRequired("Instances"))
+ }
+ if s.Instances != nil {
+ for i, v := range s.Instances {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Instances", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstances sets the Instances field's value.
+func (s *CreateCloudFormationStackInput) SetInstances(v []*InstanceEntry) *CreateCloudFormationStackInput {
+ s.Instances = v
+ return s
+}
+
+type CreateCloudFormationStackOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateCloudFormationStackOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateCloudFormationStackOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateCloudFormationStackOutput) SetOperations(v []*Operation) *CreateCloudFormationStackOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateContactMethodInput struct {
+ _ struct{} `type:"structure"`
+
+ // The destination of the contact method, such as an email address or a mobile
+ // phone number.
+ //
+ // Use the E.164 format when specifying a mobile phone number. E.164 is a standard
+ // for the phone number structure used for international telecommunication.
+ // Phone numbers that follow this format can have a maximum of 15 digits, and
+ // they are prefixed with the plus character (+) and the country code. For example,
+ // a U.S. phone number in E.164 format would be specified as +1XXX5550100. For
+ // more information, see E.164 (https://en.wikipedia.org/wiki/E.164) on Wikipedia.
+ //
+ // ContactEndpoint is a required field
+ ContactEndpoint *string `locationName:"contactEndpoint" min:"1" type:"string" required:"true"`
+
+ // The protocol of the contact method, such as Email or SMS (text messaging).
+ //
+ // The SMS protocol is supported only in the following AWS Regions.
+ //
+ // * US East (N. Virginia) (us-east-1)
+ //
+ // * US West (Oregon) (us-west-2)
+ //
+ // * Europe (Ireland) (eu-west-1)
+ //
+ // * Asia Pacific (Tokyo) (ap-northeast-1)
+ //
+ // * Asia Pacific (Singapore) (ap-southeast-1)
+ //
+ // * Asia Pacific (Sydney) (ap-southeast-2)
+ //
+ // For a list of countries/regions where SMS text messages can be sent, and
+ // the latest AWS Regions where SMS text messaging is supported, see Supported
+ // Regions and Countries (https://docs.aws.amazon.com/sns/latest/dg/sns-supported-regions-countries.html)
+ // in the Amazon SNS Developer Guide.
+ //
+ // For more information about notifications in Amazon Lightsail, see Notifications
+ // in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications).
+ //
+ // Protocol is a required field
+ Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"ContactProtocol"`
+}
+
+// String returns the string representation
+func (s CreateContactMethodInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContactMethodInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateContactMethodInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateContactMethodInput"}
+ if s.ContactEndpoint == nil {
+ invalidParams.Add(request.NewErrParamRequired("ContactEndpoint"))
+ }
+ if s.ContactEndpoint != nil && len(*s.ContactEndpoint) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ContactEndpoint", 1))
+ }
+ if s.Protocol == nil {
+ invalidParams.Add(request.NewErrParamRequired("Protocol"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetContactEndpoint sets the ContactEndpoint field's value.
+func (s *CreateContactMethodInput) SetContactEndpoint(v string) *CreateContactMethodInput {
+ s.ContactEndpoint = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *CreateContactMethodInput) SetProtocol(v string) *CreateContactMethodInput {
+ s.Protocol = &v
+ return s
+}
+
+type CreateContactMethodOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateContactMethodOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContactMethodOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateContactMethodOutput) SetOperations(v []*Operation) *CreateContactMethodOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateContainerServiceDeploymentInput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the settings of the containers that will be launched
+ // on the container service.
+ Containers map[string]*Container `locationName:"containers" type:"map"`
+
+ // An object that describes the settings of the public endpoint for the container
+ // service.
+ PublicEndpoint *EndpointRequest `locationName:"publicEndpoint" type:"structure"`
+
+ // The name of the container service for which to create the deployment.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateContainerServiceDeploymentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContainerServiceDeploymentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateContainerServiceDeploymentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateContainerServiceDeploymentInput"}
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+ if s.PublicEndpoint != nil {
+ if err := s.PublicEndpoint.Validate(); err != nil {
+ invalidParams.AddNested("PublicEndpoint", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetContainers sets the Containers field's value.
+func (s *CreateContainerServiceDeploymentInput) SetContainers(v map[string]*Container) *CreateContainerServiceDeploymentInput {
+ s.Containers = v
+ return s
+}
+
+// SetPublicEndpoint sets the PublicEndpoint field's value.
+func (s *CreateContainerServiceDeploymentInput) SetPublicEndpoint(v *EndpointRequest) *CreateContainerServiceDeploymentInput {
+ s.PublicEndpoint = v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *CreateContainerServiceDeploymentInput) SetServiceName(v string) *CreateContainerServiceDeploymentInput {
+ s.ServiceName = &v
+ return s
+}
+
+type CreateContainerServiceDeploymentOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes a container service.
+ ContainerService *ContainerService `locationName:"containerService" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateContainerServiceDeploymentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContainerServiceDeploymentOutput) GoString() string {
+ return s.String()
+}
+
+// SetContainerService sets the ContainerService field's value.
+func (s *CreateContainerServiceDeploymentOutput) SetContainerService(v *ContainerService) *CreateContainerServiceDeploymentOutput {
+ s.ContainerService = v
+ return s
+}
+
+type CreateContainerServiceInput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes a deployment for the container service.
+ //
+ // A deployment specifies the containers that will be launched on the container
+ // service and their settings, such as the ports to open, the environment variables
+ // to apply, and the launch command to run. It also specifies the container
+ // that will serve as the public endpoint of the deployment and its settings,
+ // such as the HTTP or HTTPS port to use, and the health check configuration.
+ Deployment *ContainerServiceDeploymentRequest `locationName:"deployment" type:"structure"`
+
+ // The power specification for the container service.
+ //
+ // The power specifies the amount of memory, vCPUs, and base monthly cost of
+ // each node of the container service. The power and scale of a container service
+ // makes up its configured capacity. To determine the monthly price of your
+ // container service, multiply the base price of the power with the scale (the
+ // number of nodes) of the service.
+ //
+ // Use the GetContainerServicePowers action to get a list of power options that
+ // you can specify using this parameter, and their base monthly cost.
+ //
+ // Power is a required field
+ Power *string `locationName:"power" type:"string" required:"true" enum:"ContainerServicePowerName"`
+
+ // The public domain names to use with the container service, such as example.com
+ // and www.example.com.
+ //
+ // You can specify up to four public domain names for a container service. The
+ // domain names that you specify are used when you create a deployment with
+ // a container configured as the public endpoint of your container service.
+ //
+ // If you don't specify public domain names, then you can use the default domain
+ // of the container service.
+ //
+ // You must create and validate an SSL/TLS certificate before you can use public
+ // domain names with your container service. Use the CreateCertificate action
+ // to create a certificate for the public domain names you want to use with
+ // your container service.
+ //
+ // You can specify public domain names using a string to array map as shown
+ // in the example later on this page.
+ PublicDomainNames map[string][]*string `locationName:"publicDomainNames" type:"map"`
+
+ // The scale specification for the container service.
+ //
+ // The scale specifies the allocated compute nodes of the container service.
+ // The power and scale of a container service makes up its configured capacity.
+ // To determine the monthly price of your container service, multiply the base
+ // price of the power with the scale (the number of nodes) of the service.
+ //
+ // Scale is a required field
+ Scale *int64 `locationName:"scale" min:"1" type:"integer" required:"true"`
+
+ // The name for the container service.
+ //
+ // The name that you specify for your container service will make up part of
+ // its default domain. The default domain of a container service is typically
+ // https://...cs.amazonlightsail.com. If
+ // the name of your container service is container-service-1, and it's located
+ // in the US East (Ohio) AWS region (us-east-2), then the domain for your container
+ // service will be like the following example: https://container-service-1.ur4EXAMPLE2uq.us-east-2.cs.amazonlightsail.com
+ //
+ // The following are the requirements for container service names:
+ //
+ // * Must be unique within each AWS Region in your Lightsail account.
+ //
+ // * Must contain 1 to 63 characters.
+ //
+ // * Must contain only alphanumeric characters and hyphens.
+ //
+ // * A hyphen (-) can separate words but cannot be at the start or end of
+ // the name.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the certificate during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ //
+ // For more information about tags in Lightsail, see the Amazon Lightsail Developer
+ // Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateContainerServiceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContainerServiceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateContainerServiceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateContainerServiceInput"}
+ if s.Power == nil {
+ invalidParams.Add(request.NewErrParamRequired("Power"))
+ }
+ if s.Scale == nil {
+ invalidParams.Add(request.NewErrParamRequired("Scale"))
+ }
+ if s.Scale != nil && *s.Scale < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Scale", 1))
+ }
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+ if s.Deployment != nil {
+ if err := s.Deployment.Validate(); err != nil {
+ invalidParams.AddNested("Deployment", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDeployment sets the Deployment field's value.
+func (s *CreateContainerServiceInput) SetDeployment(v *ContainerServiceDeploymentRequest) *CreateContainerServiceInput {
+ s.Deployment = v
+ return s
+}
+
+// SetPower sets the Power field's value.
+func (s *CreateContainerServiceInput) SetPower(v string) *CreateContainerServiceInput {
+ s.Power = &v
+ return s
+}
+
+// SetPublicDomainNames sets the PublicDomainNames field's value.
+func (s *CreateContainerServiceInput) SetPublicDomainNames(v map[string][]*string) *CreateContainerServiceInput {
+ s.PublicDomainNames = v
+ return s
+}
+
+// SetScale sets the Scale field's value.
+func (s *CreateContainerServiceInput) SetScale(v int64) *CreateContainerServiceInput {
+ s.Scale = &v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *CreateContainerServiceInput) SetServiceName(v string) *CreateContainerServiceInput {
+ s.ServiceName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateContainerServiceInput) SetTags(v []*Tag) *CreateContainerServiceInput {
+ s.Tags = v
+ return s
+}
+
+type CreateContainerServiceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes a container service.
+ ContainerService *ContainerService `locationName:"containerService" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateContainerServiceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContainerServiceOutput) GoString() string {
+ return s.String()
+}
+
+// SetContainerService sets the ContainerService field's value.
+func (s *CreateContainerServiceOutput) SetContainerService(v *ContainerService) *CreateContainerServiceOutput {
+ s.ContainerService = v
+ return s
+}
+
+type CreateContainerServiceRegistryLoginInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateContainerServiceRegistryLoginInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContainerServiceRegistryLoginInput) GoString() string {
+ return s.String()
+}
+
+type CreateContainerServiceRegistryLoginOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the log in information for the container service
+ // registry of your Lightsail account.
+ RegistryLogin *ContainerServiceRegistryLogin `locationName:"registryLogin" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateContainerServiceRegistryLoginOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateContainerServiceRegistryLoginOutput) GoString() string {
+ return s.String()
+}
+
+// SetRegistryLogin sets the RegistryLogin field's value.
+func (s *CreateContainerServiceRegistryLoginOutput) SetRegistryLogin(v *ContainerServiceRegistryLogin) *CreateContainerServiceRegistryLoginOutput {
+ s.RegistryLogin = v
+ return s
+}
+
+type CreateDiskFromSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that represent the add-ons to enable for the new disk.
+ AddOns []*AddOnRequest `locationName:"addOns" type:"list"`
+
+ // The Availability Zone where you want to create the disk (e.g., us-east-2a).
+ // Choose the same Availability Zone as the Lightsail instance where you want
+ // to create the disk.
+ //
+ // Use the GetRegions operation to list the Availability Zones where Lightsail
+ // is currently available.
+ //
+ // AvailabilityZone is a required field
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
+
+ // The unique Lightsail disk name (e.g., my-disk).
+ //
+ // DiskName is a required field
+ DiskName *string `locationName:"diskName" type:"string" required:"true"`
+
+ // The name of the disk snapshot (e.g., my-snapshot) from which to create the
+ // new storage disk.
+ //
+ // Constraint:
+ //
+ // * This parameter cannot be defined together with the source disk name
+ // parameter. The disk snapshot name and source disk name parameters are
+ // mutually exclusive.
+ DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string"`
+
+ // The date of the automatic snapshot to use for the new disk. Use the get auto
+ // snapshots operation to identify the dates of the available automatic snapshots.
+ //
+ // Constraints:
+ //
+ // * Must be specified in YYYY-MM-DD format.
+ //
+ // * This parameter cannot be defined together with the use latest restorable
+ // auto snapshot parameter. The restore date and use latest restorable auto
+ // snapshot parameters are mutually exclusive.
+ //
+ // * Define this parameter only when creating a new disk from an automatic
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+ RestoreDate *string `locationName:"restoreDate" type:"string"`
+
+ // The size of the disk in GB (e.g., 32).
+ //
+ // SizeInGb is a required field
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer" required:"true"`
+
+ // The name of the source disk from which the source automatic snapshot was
+ // created.
+ //
+ // Constraints:
+ //
+ // * This parameter cannot be defined together with the disk snapshot name
+ // parameter. The source disk name and disk snapshot name parameters are
+ // mutually exclusive.
+ //
+ // * Define this parameter only when creating a new disk from an automatic
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+ SourceDiskName *string `locationName:"sourceDiskName" type:"string"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // A Boolean value to indicate whether to use the latest available automatic
+ // snapshot.
+ //
+ // Constraints:
+ //
+ // * This parameter cannot be defined together with the restore date parameter.
+ // The use latest restorable auto snapshot and restore date parameters are
+ // mutually exclusive.
+ //
+ // * Define this parameter only when creating a new disk from an automatic
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+ UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"`
+}
+
+// String returns the string representation
+func (s CreateDiskFromSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDiskFromSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateDiskFromSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateDiskFromSnapshotInput"}
+ if s.AvailabilityZone == nil {
+ invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
+ }
+ if s.DiskName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskName"))
+ }
+ if s.SizeInGb == nil {
+ invalidParams.Add(request.NewErrParamRequired("SizeInGb"))
+ }
+ if s.AddOns != nil {
+ for i, v := range s.AddOns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOns sets the AddOns field's value.
+func (s *CreateDiskFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateDiskFromSnapshotInput {
+ s.AddOns = v
+ return s
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *CreateDiskFromSnapshotInput) SetAvailabilityZone(v string) *CreateDiskFromSnapshotInput {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetDiskName sets the DiskName field's value.
+func (s *CreateDiskFromSnapshotInput) SetDiskName(v string) *CreateDiskFromSnapshotInput {
+ s.DiskName = &v
+ return s
+}
+
+// SetDiskSnapshotName sets the DiskSnapshotName field's value.
+func (s *CreateDiskFromSnapshotInput) SetDiskSnapshotName(v string) *CreateDiskFromSnapshotInput {
+ s.DiskSnapshotName = &v
+ return s
+}
+
+// SetRestoreDate sets the RestoreDate field's value.
+func (s *CreateDiskFromSnapshotInput) SetRestoreDate(v string) *CreateDiskFromSnapshotInput {
+ s.RestoreDate = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *CreateDiskFromSnapshotInput) SetSizeInGb(v int64) *CreateDiskFromSnapshotInput {
+ s.SizeInGb = &v
+ return s
+}
+
+// SetSourceDiskName sets the SourceDiskName field's value.
+func (s *CreateDiskFromSnapshotInput) SetSourceDiskName(v string) *CreateDiskFromSnapshotInput {
+ s.SourceDiskName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateDiskFromSnapshotInput) SetTags(v []*Tag) *CreateDiskFromSnapshotInput {
+ s.Tags = v
+ return s
+}
+
+// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value.
+func (s *CreateDiskFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateDiskFromSnapshotInput {
+ s.UseLatestRestorableAutoSnapshot = &v
+ return s
+}
+
+type CreateDiskFromSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateDiskFromSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDiskFromSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateDiskFromSnapshotOutput) SetOperations(v []*Operation) *CreateDiskFromSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateDiskInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that represent the add-ons to enable for the new disk.
+ AddOns []*AddOnRequest `locationName:"addOns" type:"list"`
+
+ // The Availability Zone where you want to create the disk (e.g., us-east-2a).
+ // Use the same Availability Zone as the Lightsail instance to which you want
+ // to attach the disk.
+ //
+ // Use the get regions operation to list the Availability Zones where Lightsail
+ // is currently available.
+ //
+ // AvailabilityZone is a required field
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
+
+ // The unique Lightsail disk name (e.g., my-disk).
+ //
+ // DiskName is a required field
+ DiskName *string `locationName:"diskName" type:"string" required:"true"`
+
+ // The size of the disk in GB (e.g., 32).
+ //
+ // SizeInGb is a required field
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateDiskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDiskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateDiskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateDiskInput"}
+ if s.AvailabilityZone == nil {
+ invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
+ }
+ if s.DiskName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskName"))
+ }
+ if s.SizeInGb == nil {
+ invalidParams.Add(request.NewErrParamRequired("SizeInGb"))
+ }
+ if s.AddOns != nil {
+ for i, v := range s.AddOns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOns sets the AddOns field's value.
+func (s *CreateDiskInput) SetAddOns(v []*AddOnRequest) *CreateDiskInput {
+ s.AddOns = v
+ return s
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *CreateDiskInput) SetAvailabilityZone(v string) *CreateDiskInput {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetDiskName sets the DiskName field's value.
+func (s *CreateDiskInput) SetDiskName(v string) *CreateDiskInput {
+ s.DiskName = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *CreateDiskInput) SetSizeInGb(v int64) *CreateDiskInput {
+ s.SizeInGb = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateDiskInput) SetTags(v []*Tag) *CreateDiskInput {
+ s.Tags = v
+ return s
+}
+
+type CreateDiskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateDiskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDiskOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateDiskOutput) SetOperations(v []*Operation) *CreateDiskOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateDiskSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The unique name of the source disk (e.g., Disk-Virginia-1).
+ //
+ // This parameter cannot be defined together with the instance name parameter.
+ // The disk name and instance name parameters are mutually exclusive.
+ DiskName *string `locationName:"diskName" type:"string"`
+
+ // The name of the destination disk snapshot (e.g., my-disk-snapshot) based
+ // on the source disk.
+ //
+ // DiskSnapshotName is a required field
+ DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"`
+
+ // The unique name of the source instance (e.g., Amazon_Linux-512MB-Virginia-1).
+ // When this is defined, a snapshot of the instance's system volume is created.
+ //
+ // This parameter cannot be defined together with the disk name parameter. The
+ // instance name and disk name parameters are mutually exclusive.
+ InstanceName *string `locationName:"instanceName" type:"string"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateDiskSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDiskSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateDiskSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateDiskSnapshotInput"}
+ if s.DiskSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiskName sets the DiskName field's value.
+func (s *CreateDiskSnapshotInput) SetDiskName(v string) *CreateDiskSnapshotInput {
+ s.DiskName = &v
+ return s
+}
+
+// SetDiskSnapshotName sets the DiskSnapshotName field's value.
+func (s *CreateDiskSnapshotInput) SetDiskSnapshotName(v string) *CreateDiskSnapshotInput {
+ s.DiskSnapshotName = &v
+ return s
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *CreateDiskSnapshotInput) SetInstanceName(v string) *CreateDiskSnapshotInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateDiskSnapshotInput) SetTags(v []*Tag) *CreateDiskSnapshotInput {
+ s.Tags = v
+ return s
+}
+
+type CreateDiskSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateDiskSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDiskSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateDiskSnapshotOutput) SetOperations(v []*Operation) *CreateDiskSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateDistributionInput struct {
+ _ struct{} `type:"structure"`
+
+ // The bundle ID to use for the distribution.
+ //
+ // A distribution bundle describes the specifications of your distribution,
+ // such as the monthly cost and monthly network transfer quota.
+ //
+ // Use the GetDistributionBundles action to get a list of distribution bundle
+ // IDs that you can specify.
+ //
+ // BundleId is a required field
+ BundleId *string `locationName:"bundleId" type:"string" required:"true"`
+
+ // An object that describes the cache behavior settings for the distribution.
+ CacheBehaviorSettings *CacheSettings `locationName:"cacheBehaviorSettings" type:"structure"`
+
+ // An array of objects that describe the per-path cache behavior for the distribution.
+ CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"`
+
+ // An object that describes the default cache behavior for the distribution.
+ //
+ // DefaultCacheBehavior is a required field
+ DefaultCacheBehavior *CacheBehavior `locationName:"defaultCacheBehavior" type:"structure" required:"true"`
+
+ // The name for the distribution.
+ //
+ // DistributionName is a required field
+ DistributionName *string `locationName:"distributionName" type:"string" required:"true"`
+
+ // The IP address type for the distribution.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ //
+ // The default value is dualstack.
+ IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
+
+ // An object that describes the origin resource for the distribution, such as
+ // a Lightsail instance or load balancer.
+ //
+ // The distribution pulls, caches, and serves content from the origin.
+ //
+ // Origin is a required field
+ Origin *InputOrigin `locationName:"origin" type:"structure" required:"true"`
+
+ // The tag keys and optional values to add to the distribution during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateDistributionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDistributionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateDistributionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateDistributionInput"}
+ if s.BundleId == nil {
+ invalidParams.Add(request.NewErrParamRequired("BundleId"))
+ }
+ if s.DefaultCacheBehavior == nil {
+ invalidParams.Add(request.NewErrParamRequired("DefaultCacheBehavior"))
+ }
+ if s.DistributionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DistributionName"))
+ }
+ if s.Origin == nil {
+ invalidParams.Add(request.NewErrParamRequired("Origin"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *CreateDistributionInput) SetBundleId(v string) *CreateDistributionInput {
+ s.BundleId = &v
+ return s
+}
+
+// SetCacheBehaviorSettings sets the CacheBehaviorSettings field's value.
+func (s *CreateDistributionInput) SetCacheBehaviorSettings(v *CacheSettings) *CreateDistributionInput {
+ s.CacheBehaviorSettings = v
+ return s
+}
+
+// SetCacheBehaviors sets the CacheBehaviors field's value.
+func (s *CreateDistributionInput) SetCacheBehaviors(v []*CacheBehaviorPerPath) *CreateDistributionInput {
+ s.CacheBehaviors = v
+ return s
+}
+
+// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value.
+func (s *CreateDistributionInput) SetDefaultCacheBehavior(v *CacheBehavior) *CreateDistributionInput {
+ s.DefaultCacheBehavior = v
+ return s
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *CreateDistributionInput) SetDistributionName(v string) *CreateDistributionInput {
+ s.DistributionName = &v
+ return s
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *CreateDistributionInput) SetIpAddressType(v string) *CreateDistributionInput {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetOrigin sets the Origin field's value.
+func (s *CreateDistributionInput) SetOrigin(v *InputOrigin) *CreateDistributionInput {
+ s.Origin = v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateDistributionInput) SetTags(v []*Tag) *CreateDistributionInput {
+ s.Tags = v
+ return s
+}
+
+type CreateDistributionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the distribution created.
+ Distribution *LightsailDistribution `locationName:"distribution" type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateDistributionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDistributionOutput) GoString() string {
+ return s.String()
+}
+
+// SetDistribution sets the Distribution field's value.
+func (s *CreateDistributionOutput) SetDistribution(v *LightsailDistribution) *CreateDistributionOutput {
+ s.Distribution = v
+ return s
+}
+
+// SetOperation sets the Operation field's value.
+func (s *CreateDistributionOutput) SetOperation(v *Operation) *CreateDistributionOutput {
+ s.Operation = v
+ return s
+}
+
+type CreateDomainEntryInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the domain entry
+ // request.
+ //
+ // DomainEntry is a required field
+ DomainEntry *DomainEntry `locationName:"domainEntry" type:"structure" required:"true"`
+
+ // The domain name (e.g., example.com) for which you want to create the domain
+ // entry.
+ //
+ // DomainName is a required field
+ DomainName *string `locationName:"domainName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateDomainEntryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDomainEntryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateDomainEntryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateDomainEntryInput"}
+ if s.DomainEntry == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainEntry"))
+ }
+ if s.DomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDomainEntry sets the DomainEntry field's value.
+func (s *CreateDomainEntryInput) SetDomainEntry(v *DomainEntry) *CreateDomainEntryInput {
+ s.DomainEntry = v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *CreateDomainEntryInput) SetDomainName(v string) *CreateDomainEntryInput {
+ s.DomainName = &v
+ return s
+}
+
+type CreateDomainEntryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateDomainEntryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDomainEntryOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *CreateDomainEntryOutput) SetOperation(v *Operation) *CreateDomainEntryOutput {
+ s.Operation = v
+ return s
+}
+
+type CreateDomainInput struct {
+ _ struct{} `type:"structure"`
+
+ // The domain name to manage (e.g., example.com).
+ //
+ // You cannot register a new domain name using Lightsail. You must register
+ // a domain name using Amazon Route 53 or another domain name registrar. If
+ // you have already registered your domain, you can enter its name in this parameter
+ // to manage the DNS records for that domain using Lightsail.
+ //
+ // DomainName is a required field
+ DomainName *string `locationName:"domainName" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateDomainInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDomainInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateDomainInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateDomainInput"}
+ if s.DomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *CreateDomainInput) SetDomainName(v string) *CreateDomainInput {
+ s.DomainName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateDomainInput) SetTags(v []*Tag) *CreateDomainInput {
+ s.Tags = v
+ return s
+}
+
+type CreateDomainOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateDomainOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateDomainOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *CreateDomainOutput) SetOperation(v *Operation) *CreateDomainOutput {
+ s.Operation = v
+ return s
+}
+
+type CreateInstanceSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Lightsail instance on which to base your snapshot.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+
+ // The name for your new snapshot.
+ //
+ // InstanceSnapshotName is a required field
+ InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateInstanceSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstanceSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateInstanceSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateInstanceSnapshotInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+ if s.InstanceSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *CreateInstanceSnapshotInput) SetInstanceName(v string) *CreateInstanceSnapshotInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetInstanceSnapshotName sets the InstanceSnapshotName field's value.
+func (s *CreateInstanceSnapshotInput) SetInstanceSnapshotName(v string) *CreateInstanceSnapshotInput {
+ s.InstanceSnapshotName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateInstanceSnapshotInput) SetTags(v []*Tag) *CreateInstanceSnapshotInput {
+ s.Tags = v
+ return s
+}
+
+type CreateInstanceSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateInstanceSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstanceSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateInstanceSnapshotOutput) SetOperations(v []*Operation) *CreateInstanceSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateInstancesFromSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects representing the add-ons to enable for the new instance.
+ AddOns []*AddOnRequest `locationName:"addOns" type:"list"`
+
+ // An object containing information about one or more disk mappings.
+ AttachedDiskMapping map[string][]*DiskMap `locationName:"attachedDiskMapping" type:"map"`
+
+ // The Availability Zone where you want to create your instances. Use the following
+ // formatting: us-east-2a (case sensitive). You can get a list of Availability
+ // Zones by using the get regions (http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetRegions.html)
+ // operation. Be sure to add the include Availability Zones parameter to your
+ // request.
+ //
+ // AvailabilityZone is a required field
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
+
+ // The bundle of specification information for your virtual private server (or
+ // instance), including the pricing plan (e.g., micro_1_0).
+ //
+ // BundleId is a required field
+ BundleId *string `locationName:"bundleId" type:"string" required:"true"`
+
+ // The names for your new instances.
+ //
+ // InstanceNames is a required field
+ InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"`
+
+ // The name of the instance snapshot on which you are basing your new instances.
+ // Use the get instance snapshots operation to return information about your
+ // existing snapshots.
+ //
+ // Constraint:
+ //
+ // * This parameter cannot be defined together with the source instance name
+ // parameter. The instance snapshot name and source instance name parameters
+ // are mutually exclusive.
+ InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string"`
+
+ // The IP address type for the instance.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ //
+ // The default value is dualstack.
+ IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
+
+ // The name for your key pair.
+ KeyPairName *string `locationName:"keyPairName" type:"string"`
+
+ // The date of the automatic snapshot to use for the new instance. Use the get
+ // auto snapshots operation to identify the dates of the available automatic
+ // snapshots.
+ //
+ // Constraints:
+ //
+ // * Must be specified in YYYY-MM-DD format.
+ //
+ // * This parameter cannot be defined together with the use latest restorable
+ // auto snapshot parameter. The restore date and use latest restorable auto
+ // snapshot parameters are mutually exclusive.
+ //
+ // * Define this parameter only when creating a new instance from an automatic
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+ RestoreDate *string `locationName:"restoreDate" type:"string"`
+
+ // The name of the source instance from which the source automatic snapshot
+ // was created.
+ //
+ // Constraints:
+ //
+ // * This parameter cannot be defined together with the instance snapshot
+ // name parameter. The source instance name and instance snapshot name parameters
+ // are mutually exclusive.
+ //
+ // * Define this parameter only when creating a new instance from an automatic
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+ SourceInstanceName *string `locationName:"sourceInstanceName" type:"string"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // A Boolean value to indicate whether to use the latest available automatic
+ // snapshot.
+ //
+ // Constraints:
+ //
+ // * This parameter cannot be defined together with the restore date parameter.
+ // The use latest restorable auto snapshot and restore date parameters are
+ // mutually exclusive.
+ //
+ // * Define this parameter only when creating a new instance from an automatic
+ // snapshot. For more information, see the Amazon Lightsail Developer Guide
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots).
+ UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"`
+
+ // You can create a launch script that configures a server with additional user
+ // data. For example, apt-get -y update.
+ //
+ // Depending on the machine image you choose, the command to get software on
+ // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu
+ // use apt-get, and FreeBSD uses pkg. For a complete list, see the Amazon Lightsail
+ // Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/compare-options-choose-lightsail-instance-image).
+ UserData *string `locationName:"userData" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateInstancesFromSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstancesFromSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateInstancesFromSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateInstancesFromSnapshotInput"}
+ if s.AvailabilityZone == nil {
+ invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
+ }
+ if s.BundleId == nil {
+ invalidParams.Add(request.NewErrParamRequired("BundleId"))
+ }
+ if s.InstanceNames == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceNames"))
+ }
+ if s.AddOns != nil {
+ for i, v := range s.AddOns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOns sets the AddOns field's value.
+func (s *CreateInstancesFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateInstancesFromSnapshotInput {
+ s.AddOns = v
+ return s
+}
+
+// SetAttachedDiskMapping sets the AttachedDiskMapping field's value.
+func (s *CreateInstancesFromSnapshotInput) SetAttachedDiskMapping(v map[string][]*DiskMap) *CreateInstancesFromSnapshotInput {
+ s.AttachedDiskMapping = v
+ return s
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *CreateInstancesFromSnapshotInput) SetAvailabilityZone(v string) *CreateInstancesFromSnapshotInput {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *CreateInstancesFromSnapshotInput) SetBundleId(v string) *CreateInstancesFromSnapshotInput {
+ s.BundleId = &v
+ return s
+}
+
+// SetInstanceNames sets the InstanceNames field's value.
+func (s *CreateInstancesFromSnapshotInput) SetInstanceNames(v []*string) *CreateInstancesFromSnapshotInput {
+ s.InstanceNames = v
+ return s
+}
+
+// SetInstanceSnapshotName sets the InstanceSnapshotName field's value.
+func (s *CreateInstancesFromSnapshotInput) SetInstanceSnapshotName(v string) *CreateInstancesFromSnapshotInput {
+ s.InstanceSnapshotName = &v
+ return s
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *CreateInstancesFromSnapshotInput) SetIpAddressType(v string) *CreateInstancesFromSnapshotInput {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetKeyPairName sets the KeyPairName field's value.
+func (s *CreateInstancesFromSnapshotInput) SetKeyPairName(v string) *CreateInstancesFromSnapshotInput {
+ s.KeyPairName = &v
+ return s
+}
+
+// SetRestoreDate sets the RestoreDate field's value.
+func (s *CreateInstancesFromSnapshotInput) SetRestoreDate(v string) *CreateInstancesFromSnapshotInput {
+ s.RestoreDate = &v
+ return s
+}
+
+// SetSourceInstanceName sets the SourceInstanceName field's value.
+func (s *CreateInstancesFromSnapshotInput) SetSourceInstanceName(v string) *CreateInstancesFromSnapshotInput {
+ s.SourceInstanceName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateInstancesFromSnapshotInput) SetTags(v []*Tag) *CreateInstancesFromSnapshotInput {
+ s.Tags = v
+ return s
+}
+
+// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value.
+func (s *CreateInstancesFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateInstancesFromSnapshotInput {
+ s.UseLatestRestorableAutoSnapshot = &v
+ return s
+}
+
+// SetUserData sets the UserData field's value.
+func (s *CreateInstancesFromSnapshotInput) SetUserData(v string) *CreateInstancesFromSnapshotInput {
+ s.UserData = &v
+ return s
+}
+
+type CreateInstancesFromSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateInstancesFromSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstancesFromSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateInstancesFromSnapshotOutput) SetOperations(v []*Operation) *CreateInstancesFromSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateInstancesInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects representing the add-ons to enable for the new instance.
+ AddOns []*AddOnRequest `locationName:"addOns" type:"list"`
+
+ // The Availability Zone in which to create your instance. Use the following
+ // format: us-east-2a (case sensitive). You can get a list of Availability Zones
+ // by using the get regions (http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetRegions.html)
+ // operation. Be sure to add the include Availability Zones parameter to your
+ // request.
+ //
+ // AvailabilityZone is a required field
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
+
+ // The ID for a virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0).
+ // Use the get blueprints operation to return a list of available images (or
+ // blueprints).
+ //
+ // Use active blueprints when creating new instances. Inactive blueprints are
+ // listed to support customers with existing instances and are not necessarily
+ // available to create new instances. Blueprints are marked inactive when they
+ // become outdated due to operating system updates or new application releases.
+ //
+ // BlueprintId is a required field
+ BlueprintId *string `locationName:"blueprintId" type:"string" required:"true"`
+
+ // The bundle of specification information for your virtual private server (or
+ // instance), including the pricing plan (e.g., micro_1_0).
+ //
+ // BundleId is a required field
+ BundleId *string `locationName:"bundleId" type:"string" required:"true"`
+
+ // (Deprecated) The name for your custom image.
+ //
+ // In releases prior to June 12, 2017, this parameter was ignored by the API.
+ // It is now deprecated.
+ //
+ // Deprecated: CustomImageName has been deprecated
+ CustomImageName *string `locationName:"customImageName" deprecated:"true" type:"string"`
+
+ // The names to use for your new Lightsail instances. Separate multiple values
+ // using quotation marks and commas, for example: ["MyFirstInstance","MySecondInstance"]
+ //
+ // InstanceNames is a required field
+ InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"`
+
+ // The IP address type for the instance.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ //
+ // The default value is dualstack.
+ IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
+
+ // The name of your key pair.
+ KeyPairName *string `locationName:"keyPairName" type:"string"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // A launch script you can create that configures a server with additional user
+ // data. For example, you might want to run apt-get -y update.
+ //
+ // Depending on the machine image you choose, the command to get software on
+ // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu
+ // use apt-get, and FreeBSD uses pkg. For a complete list, see the Amazon Lightsail
+ // Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/compare-options-choose-lightsail-instance-image).
+ UserData *string `locationName:"userData" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateInstancesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstancesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateInstancesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateInstancesInput"}
+ if s.AvailabilityZone == nil {
+ invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
+ }
+ if s.BlueprintId == nil {
+ invalidParams.Add(request.NewErrParamRequired("BlueprintId"))
+ }
+ if s.BundleId == nil {
+ invalidParams.Add(request.NewErrParamRequired("BundleId"))
+ }
+ if s.InstanceNames == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceNames"))
+ }
+ if s.AddOns != nil {
+ for i, v := range s.AddOns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOns sets the AddOns field's value.
+func (s *CreateInstancesInput) SetAddOns(v []*AddOnRequest) *CreateInstancesInput {
+ s.AddOns = v
+ return s
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *CreateInstancesInput) SetAvailabilityZone(v string) *CreateInstancesInput {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetBlueprintId sets the BlueprintId field's value.
+func (s *CreateInstancesInput) SetBlueprintId(v string) *CreateInstancesInput {
+ s.BlueprintId = &v
+ return s
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *CreateInstancesInput) SetBundleId(v string) *CreateInstancesInput {
+ s.BundleId = &v
+ return s
+}
+
+// SetCustomImageName sets the CustomImageName field's value.
+func (s *CreateInstancesInput) SetCustomImageName(v string) *CreateInstancesInput {
+ s.CustomImageName = &v
+ return s
+}
+
+// SetInstanceNames sets the InstanceNames field's value.
+func (s *CreateInstancesInput) SetInstanceNames(v []*string) *CreateInstancesInput {
+ s.InstanceNames = v
+ return s
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *CreateInstancesInput) SetIpAddressType(v string) *CreateInstancesInput {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetKeyPairName sets the KeyPairName field's value.
+func (s *CreateInstancesInput) SetKeyPairName(v string) *CreateInstancesInput {
+ s.KeyPairName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateInstancesInput) SetTags(v []*Tag) *CreateInstancesInput {
+ s.Tags = v
+ return s
+}
+
+// SetUserData sets the UserData field's value.
+func (s *CreateInstancesInput) SetUserData(v string) *CreateInstancesInput {
+ s.UserData = &v
+ return s
+}
+
+type CreateInstancesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateInstancesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateInstancesOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateInstancesOutput) SetOperations(v []*Operation) *CreateInstancesOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateKeyPairInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name for your new key pair.
+ //
+ // KeyPairName is a required field
+ KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateKeyPairInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateKeyPairInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateKeyPairInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateKeyPairInput"}
+ if s.KeyPairName == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyPairName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKeyPairName sets the KeyPairName field's value.
+func (s *CreateKeyPairInput) SetKeyPairName(v string) *CreateKeyPairInput {
+ s.KeyPairName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateKeyPairInput) SetTags(v []*Tag) *CreateKeyPairInput {
+ s.Tags = v
+ return s
+}
+
+type CreateKeyPairOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the new key pair
+ // you just created.
+ KeyPair *KeyPair `locationName:"keyPair" type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+
+ // A base64-encoded RSA private key.
+ PrivateKeyBase64 *string `locationName:"privateKeyBase64" type:"string"`
+
+ // A base64-encoded public key of the ssh-rsa type.
+ PublicKeyBase64 *string `locationName:"publicKeyBase64" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateKeyPairOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateKeyPairOutput) GoString() string {
+ return s.String()
+}
+
+// SetKeyPair sets the KeyPair field's value.
+func (s *CreateKeyPairOutput) SetKeyPair(v *KeyPair) *CreateKeyPairOutput {
+ s.KeyPair = v
+ return s
+}
+
+// SetOperation sets the Operation field's value.
+func (s *CreateKeyPairOutput) SetOperation(v *Operation) *CreateKeyPairOutput {
+ s.Operation = v
+ return s
+}
+
+// SetPrivateKeyBase64 sets the PrivateKeyBase64 field's value.
+func (s *CreateKeyPairOutput) SetPrivateKeyBase64(v string) *CreateKeyPairOutput {
+ s.PrivateKeyBase64 = &v
+ return s
+}
+
+// SetPublicKeyBase64 sets the PublicKeyBase64 field's value.
+func (s *CreateKeyPairOutput) SetPublicKeyBase64(v string) *CreateKeyPairOutput {
+ s.PublicKeyBase64 = &v
+ return s
+}
+
+type CreateLoadBalancerInput struct {
+ _ struct{} `type:"structure"`
+
+ // The optional alternative domains and subdomains to use with your SSL/TLS
+ // certificate (e.g., www.example.com, example.com, m.example.com, blog.example.com).
+ CertificateAlternativeNames []*string `locationName:"certificateAlternativeNames" type:"list"`
+
+ // The domain name with which your certificate is associated (e.g., example.com).
+ //
+ // If you specify certificateDomainName, then certificateName is required (and
+ // vice-versa).
+ CertificateDomainName *string `locationName:"certificateDomainName" type:"string"`
+
+ // The name of the SSL/TLS certificate.
+ //
+ // If you specify certificateName, then certificateDomainName is required (and
+ // vice-versa).
+ CertificateName *string `locationName:"certificateName" type:"string"`
+
+ // The path you provided to perform the load balancer health check. If you didn't
+ // specify a health check path, Lightsail uses the root path of your website
+ // (e.g., "/").
+ //
+ // You may want to specify a custom health check path other than the root of
+ // your application if your home page loads slowly or has a lot of media or
+ // scripting on it.
+ HealthCheckPath *string `locationName:"healthCheckPath" type:"string"`
+
+ // The instance port where you're creating your load balancer.
+ //
+ // InstancePort is a required field
+ InstancePort *int64 `locationName:"instancePort" type:"integer" required:"true"`
+
+ // The IP address type for the load balancer.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ //
+ // The default value is dualstack.
+ IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
+
+ // The name of your load balancer.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateLoadBalancerInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLoadBalancerInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateLoadBalancerInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerInput"}
+ if s.InstancePort == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstancePort"))
+ }
+ if s.InstancePort != nil && *s.InstancePort < -1 {
+ invalidParams.Add(request.NewErrParamMinValue("InstancePort", -1))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCertificateAlternativeNames sets the CertificateAlternativeNames field's value.
+func (s *CreateLoadBalancerInput) SetCertificateAlternativeNames(v []*string) *CreateLoadBalancerInput {
+ s.CertificateAlternativeNames = v
+ return s
+}
+
+// SetCertificateDomainName sets the CertificateDomainName field's value.
+func (s *CreateLoadBalancerInput) SetCertificateDomainName(v string) *CreateLoadBalancerInput {
+ s.CertificateDomainName = &v
+ return s
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *CreateLoadBalancerInput) SetCertificateName(v string) *CreateLoadBalancerInput {
+ s.CertificateName = &v
+ return s
+}
+
+// SetHealthCheckPath sets the HealthCheckPath field's value.
+func (s *CreateLoadBalancerInput) SetHealthCheckPath(v string) *CreateLoadBalancerInput {
+ s.HealthCheckPath = &v
+ return s
+}
+
+// SetInstancePort sets the InstancePort field's value.
+func (s *CreateLoadBalancerInput) SetInstancePort(v int64) *CreateLoadBalancerInput {
+ s.InstancePort = &v
+ return s
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *CreateLoadBalancerInput) SetIpAddressType(v string) *CreateLoadBalancerInput {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *CreateLoadBalancerInput) SetLoadBalancerName(v string) *CreateLoadBalancerInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateLoadBalancerInput) SetTags(v []*Tag) *CreateLoadBalancerInput {
+ s.Tags = v
+ return s
+}
+
+type CreateLoadBalancerOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateLoadBalancerOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLoadBalancerOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateLoadBalancerOutput) SetOperations(v []*Operation) *CreateLoadBalancerOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateLoadBalancerTlsCertificateInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of strings listing alternative domains and subdomains for your SSL/TLS
+ // certificate. Lightsail will de-dupe the names for you. You can have a maximum
+ // of 9 alternative names (in addition to the 1 primary domain). We do not support
+ // wildcards (e.g., *.example.com).
+ CertificateAlternativeNames []*string `locationName:"certificateAlternativeNames" type:"list"`
+
+ // The domain name (e.g., example.com) for your SSL/TLS certificate.
+ //
+ // CertificateDomainName is a required field
+ CertificateDomainName *string `locationName:"certificateDomainName" type:"string" required:"true"`
+
+ // The SSL/TLS certificate name.
+ //
+ // You can have up to 10 certificates in your account at one time. Each Lightsail
+ // load balancer can have up to 2 certificates associated with it at one time.
+ // There is also an overall limit to the number of certificates that can be
+ // issue in a 365-day period. For more information, see Limits (http://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html).
+ //
+ // CertificateName is a required field
+ CertificateName *string `locationName:"certificateName" type:"string" required:"true"`
+
+ // The load balancer name where you want to create the SSL/TLS certificate.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateLoadBalancerTlsCertificateInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLoadBalancerTlsCertificateInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateLoadBalancerTlsCertificateInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerTlsCertificateInput"}
+ if s.CertificateDomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateDomainName"))
+ }
+ if s.CertificateName == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateName"))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCertificateAlternativeNames sets the CertificateAlternativeNames field's value.
+func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateAlternativeNames(v []*string) *CreateLoadBalancerTlsCertificateInput {
+ s.CertificateAlternativeNames = v
+ return s
+}
+
+// SetCertificateDomainName sets the CertificateDomainName field's value.
+func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateDomainName(v string) *CreateLoadBalancerTlsCertificateInput {
+ s.CertificateDomainName = &v
+ return s
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateName(v string) *CreateLoadBalancerTlsCertificateInput {
+ s.CertificateName = &v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *CreateLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *CreateLoadBalancerTlsCertificateInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateLoadBalancerTlsCertificateInput) SetTags(v []*Tag) *CreateLoadBalancerTlsCertificateInput {
+ s.Tags = v
+ return s
+}
+
+type CreateLoadBalancerTlsCertificateOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateLoadBalancerTlsCertificateOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLoadBalancerTlsCertificateOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *CreateLoadBalancerTlsCertificateOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateRelationalDatabaseFromSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Availability Zone in which to create your new database. Use the us-east-2a
+ // case-sensitive format.
+ //
+ // You can get a list of Availability Zones by using the get regions operation.
+ // Be sure to add the include relational database Availability Zones parameter
+ // to your request.
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
+
+ // Specifies the accessibility options for your new database. A value of true
+ // specifies a database that is available to resources outside of your Lightsail
+ // account. A value of false specifies a database that is available only to
+ // your Lightsail resources in the same region as your database.
+ PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"`
+
+ // The bundle ID for your new database. A bundle describes the performance specifications
+ // for your database.
+ //
+ // You can get a list of database bundle IDs by using the get relational database
+ // bundles operation.
+ //
+ // When creating a new database from a snapshot, you cannot choose a bundle
+ // that is smaller than the bundle of the source database.
+ RelationalDatabaseBundleId *string `locationName:"relationalDatabaseBundleId" type:"string"`
+
+ // The name to use for your new Lightsail database resource.
+ //
+ // Constraints:
+ //
+ // * Must contain from 2 to 255 alphanumeric characters, or hyphens.
+ //
+ // * The first and last character must be a letter or number.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // The name of the database snapshot from which to create your new database.
+ RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string"`
+
+ // The date and time to restore your database from.
+ //
+ // Constraints:
+ //
+ // * Must be before the latest restorable time for the database.
+ //
+ // * Cannot be specified if the use latest restorable time parameter is true.
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use a
+ // restore time of October 1, 2018, at 8 PM UTC, then you input 1538424000
+ // as the restore time.
+ RestoreTime *time.Time `locationName:"restoreTime" type:"timestamp"`
+
+ // The name of the source database.
+ SourceRelationalDatabaseName *string `locationName:"sourceRelationalDatabaseName" type:"string"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // Specifies whether your database is restored from the latest backup time.
+ // A value of true restores from the latest backup time.
+ //
+ // Default: false
+ //
+ // Constraints: Cannot be specified if the restore time parameter is provided.
+ UseLatestRestorableTime *bool `locationName:"useLatestRestorableTime" type:"boolean"`
+}
+
+// String returns the string representation
+func (s CreateRelationalDatabaseFromSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRelationalDatabaseFromSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateRelationalDatabaseFromSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseFromSnapshotInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetAvailabilityZone(v string) *CreateRelationalDatabaseFromSnapshotInput {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetPubliclyAccessible sets the PubliclyAccessible field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetPubliclyAccessible(v bool) *CreateRelationalDatabaseFromSnapshotInput {
+ s.PubliclyAccessible = &v
+ return s
+}
+
+// SetRelationalDatabaseBundleId sets the RelationalDatabaseBundleId field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseBundleId(v string) *CreateRelationalDatabaseFromSnapshotInput {
+ s.RelationalDatabaseBundleId = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseFromSnapshotInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *CreateRelationalDatabaseFromSnapshotInput {
+ s.RelationalDatabaseSnapshotName = &v
+ return s
+}
+
+// SetRestoreTime sets the RestoreTime field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetRestoreTime(v time.Time) *CreateRelationalDatabaseFromSnapshotInput {
+ s.RestoreTime = &v
+ return s
+}
+
+// SetSourceRelationalDatabaseName sets the SourceRelationalDatabaseName field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetSourceRelationalDatabaseName(v string) *CreateRelationalDatabaseFromSnapshotInput {
+ s.SourceRelationalDatabaseName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetTags(v []*Tag) *CreateRelationalDatabaseFromSnapshotInput {
+ s.Tags = v
+ return s
+}
+
+// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value.
+func (s *CreateRelationalDatabaseFromSnapshotInput) SetUseLatestRestorableTime(v bool) *CreateRelationalDatabaseFromSnapshotInput {
+ s.UseLatestRestorableTime = &v
+ return s
+}
+
+type CreateRelationalDatabaseFromSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateRelationalDatabaseFromSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRelationalDatabaseFromSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateRelationalDatabaseFromSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseFromSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateRelationalDatabaseInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Availability Zone in which to create your new database. Use the us-east-2a
+ // case-sensitive format.
+ //
+ // You can get a list of Availability Zones by using the get regions operation.
+ // Be sure to add the include relational database Availability Zones parameter
+ // to your request.
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
+
+ // The meaning of this parameter differs according to the database engine you
+ // use.
+ //
+ // MySQL
+ //
+ // The name of the database to create when the Lightsail database resource is
+ // created. If this parameter isn't specified, no database is created in the
+ // database resource.
+ //
+ // Constraints:
+ //
+ // * Must contain 1 to 64 letters or numbers.
+ //
+ // * Must begin with a letter. Subsequent characters can be letters, underscores,
+ // or digits (0- 9).
+ //
+ // * Can't be a word reserved by the specified database engine. For more
+ // information about reserved words in MySQL, see the Keywords and Reserved
+ // Words articles for MySQL 5.6 (https://dev.mysql.com/doc/refman/5.6/en/keywords.html),
+ // MySQL 5.7 (https://dev.mysql.com/doc/refman/5.7/en/keywords.html), and
+ // MySQL 8.0 (https://dev.mysql.com/doc/refman/8.0/en/keywords.html).
+ //
+ // PostgreSQL
+ //
+ // The name of the database to create when the Lightsail database resource is
+ // created. If this parameter isn't specified, a database named postgres is
+ // created in the database resource.
+ //
+ // Constraints:
+ //
+ // * Must contain 1 to 63 letters or numbers.
+ //
+ // * Must begin with a letter. Subsequent characters can be letters, underscores,
+ // or digits (0- 9).
+ //
+ // * Can't be a word reserved by the specified database engine. For more
+ // information about reserved words in PostgreSQL, see the SQL Key Words
+ // articles for PostgreSQL 9.6 (https://www.postgresql.org/docs/9.6/sql-keywords-appendix.html),
+ // PostgreSQL 10 (https://www.postgresql.org/docs/10/sql-keywords-appendix.html),
+ // PostgreSQL 11 (https://www.postgresql.org/docs/11/sql-keywords-appendix.html),
+ // and PostgreSQL 12 (https://www.postgresql.org/docs/12/sql-keywords-appendix.html).
+ //
+ // MasterDatabaseName is a required field
+ MasterDatabaseName *string `locationName:"masterDatabaseName" type:"string" required:"true"`
+
+ // The password for the master user. The password can include any printable
+ // ASCII character except "/", """, or "@". It cannot contain spaces.
+ //
+ // MySQL
+ //
+ // Constraints: Must contain from 8 to 41 characters.
+ //
+ // PostgreSQL
+ //
+ // Constraints: Must contain from 8 to 128 characters.
+ MasterUserPassword *string `locationName:"masterUserPassword" type:"string" sensitive:"true"`
+
+ // The name for the master user.
+ //
+ // MySQL
+ //
+ // Constraints:
+ //
+ // * Required for MySQL.
+ //
+ // * Must be 1 to 16 letters or numbers. Can contain underscores.
+ //
+ // * First character must be a letter.
+ //
+ // * Can't be a reserved word for the chosen database engine. For more information
+ // about reserved words in MySQL 5.6 or 5.7, see the Keywords and Reserved
+ // Words articles for MySQL 5.6 (https://dev.mysql.com/doc/refman/5.6/en/keywords.html),
+ // MySQL 5.7 (https://dev.mysql.com/doc/refman/5.7/en/keywords.html), or
+ // MySQL 8.0 (https://dev.mysql.com/doc/refman/8.0/en/keywords.html).
+ //
+ // PostgreSQL
+ //
+ // Constraints:
+ //
+ // * Required for PostgreSQL.
+ //
+ // * Must be 1 to 63 letters or numbers. Can contain underscores.
+ //
+ // * First character must be a letter.
+ //
+ // * Can't be a reserved word for the chosen database engine. For more information
+ // about reserved words in MySQL 5.6 or 5.7, see the Keywords and Reserved
+ // Words articles for PostgreSQL 9.6 (https://www.postgresql.org/docs/9.6/sql-keywords-appendix.html),
+ // PostgreSQL 10 (https://www.postgresql.org/docs/10/sql-keywords-appendix.html),
+ // PostgreSQL 11 (https://www.postgresql.org/docs/11/sql-keywords-appendix.html),
+ // and PostgreSQL 12 (https://www.postgresql.org/docs/12/sql-keywords-appendix.html).
+ //
+ // MasterUsername is a required field
+ MasterUsername *string `locationName:"masterUsername" type:"string" required:"true"`
+
+ // The daily time range during which automated backups are created for your
+ // new database if automated backups are enabled.
+ //
+ // The default is a 30-minute window selected at random from an 8-hour block
+ // of time for each AWS Region. For more information about the preferred backup
+ // window time blocks for each region, see the Working With Backups (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow)
+ // guide in the Amazon Relational Database Service (Amazon RDS) documentation.
+ //
+ // Constraints:
+ //
+ // * Must be in the hh24:mi-hh24:mi format. Example: 16:00-16:30
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Must not conflict with the preferred maintenance window.
+ //
+ // * Must be at least 30 minutes.
+ PreferredBackupWindow *string `locationName:"preferredBackupWindow" type:"string"`
+
+ // The weekly time range during which system maintenance can occur on your new
+ // database.
+ //
+ // The default is a 30-minute window selected at random from an 8-hour block
+ // of time for each AWS Region, occurring on a random day of the week.
+ //
+ // Constraints:
+ //
+ // * Must be in the ddd:hh24:mi-ddd:hh24:mi format.
+ //
+ // * Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.
+ //
+ // * Must be at least 30 minutes.
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Example: Tue:17:00-Tue:17:30
+ PreferredMaintenanceWindow *string `locationName:"preferredMaintenanceWindow" type:"string"`
+
+ // Specifies the accessibility options for your new database. A value of true
+ // specifies a database that is available to resources outside of your Lightsail
+ // account. A value of false specifies a database that is available only to
+ // your Lightsail resources in the same region as your database.
+ PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"`
+
+ // The blueprint ID for your new database. A blueprint describes the major engine
+ // version of a database.
+ //
+ // You can get a list of database blueprints IDs by using the get relational
+ // database blueprints operation.
+ //
+ // RelationalDatabaseBlueprintId is a required field
+ RelationalDatabaseBlueprintId *string `locationName:"relationalDatabaseBlueprintId" type:"string" required:"true"`
+
+ // The bundle ID for your new database. A bundle describes the performance specifications
+ // for your database.
+ //
+ // You can get a list of database bundle IDs by using the get relational database
+ // bundles operation.
+ //
+ // RelationalDatabaseBundleId is a required field
+ RelationalDatabaseBundleId *string `locationName:"relationalDatabaseBundleId" type:"string" required:"true"`
+
+ // The name to use for your new Lightsail database resource.
+ //
+ // Constraints:
+ //
+ // * Must contain from 2 to 255 alphanumeric characters, or hyphens.
+ //
+ // * The first and last character must be a letter or number.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateRelationalDatabaseInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRelationalDatabaseInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateRelationalDatabaseInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseInput"}
+ if s.MasterDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MasterDatabaseName"))
+ }
+ if s.MasterUsername == nil {
+ invalidParams.Add(request.NewErrParamRequired("MasterUsername"))
+ }
+ if s.RelationalDatabaseBlueprintId == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseBlueprintId"))
+ }
+ if s.RelationalDatabaseBundleId == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseBundleId"))
+ }
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *CreateRelationalDatabaseInput) SetAvailabilityZone(v string) *CreateRelationalDatabaseInput {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetMasterDatabaseName sets the MasterDatabaseName field's value.
+func (s *CreateRelationalDatabaseInput) SetMasterDatabaseName(v string) *CreateRelationalDatabaseInput {
+ s.MasterDatabaseName = &v
+ return s
+}
+
+// SetMasterUserPassword sets the MasterUserPassword field's value.
+func (s *CreateRelationalDatabaseInput) SetMasterUserPassword(v string) *CreateRelationalDatabaseInput {
+ s.MasterUserPassword = &v
+ return s
+}
+
+// SetMasterUsername sets the MasterUsername field's value.
+func (s *CreateRelationalDatabaseInput) SetMasterUsername(v string) *CreateRelationalDatabaseInput {
+ s.MasterUsername = &v
+ return s
+}
+
+// SetPreferredBackupWindow sets the PreferredBackupWindow field's value.
+func (s *CreateRelationalDatabaseInput) SetPreferredBackupWindow(v string) *CreateRelationalDatabaseInput {
+ s.PreferredBackupWindow = &v
+ return s
+}
+
+// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
+func (s *CreateRelationalDatabaseInput) SetPreferredMaintenanceWindow(v string) *CreateRelationalDatabaseInput {
+ s.PreferredMaintenanceWindow = &v
+ return s
+}
+
+// SetPubliclyAccessible sets the PubliclyAccessible field's value.
+func (s *CreateRelationalDatabaseInput) SetPubliclyAccessible(v bool) *CreateRelationalDatabaseInput {
+ s.PubliclyAccessible = &v
+ return s
+}
+
+// SetRelationalDatabaseBlueprintId sets the RelationalDatabaseBlueprintId field's value.
+func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseBlueprintId(v string) *CreateRelationalDatabaseInput {
+ s.RelationalDatabaseBlueprintId = &v
+ return s
+}
+
+// SetRelationalDatabaseBundleId sets the RelationalDatabaseBundleId field's value.
+func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseBundleId(v string) *CreateRelationalDatabaseInput {
+ s.RelationalDatabaseBundleId = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateRelationalDatabaseInput) SetTags(v []*Tag) *CreateRelationalDatabaseInput {
+ s.Tags = v
+ return s
+}
+
+type CreateRelationalDatabaseOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateRelationalDatabaseOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRelationalDatabaseOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateRelationalDatabaseOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseOutput {
+ s.Operations = v
+ return s
+}
+
+type CreateRelationalDatabaseSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the database on which to base your new snapshot.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // The name for your new database snapshot.
+ //
+ // Constraints:
+ //
+ // * Must contain from 2 to 255 alphanumeric characters, or hyphens.
+ //
+ // * The first and last character must be a letter or number.
+ //
+ // RelationalDatabaseSnapshotName is a required field
+ RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string" required:"true"`
+
+ // The tag keys and optional values to add to the resource during create.
+ //
+ // Use the TagResource action to tag a resource after it's created.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateRelationalDatabaseSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRelationalDatabaseSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateRelationalDatabaseSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseSnapshotInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+ if s.RelationalDatabaseSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *CreateRelationalDatabaseSnapshotInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseSnapshotInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value.
+func (s *CreateRelationalDatabaseSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *CreateRelationalDatabaseSnapshotInput {
+ s.RelationalDatabaseSnapshotName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateRelationalDatabaseSnapshotInput) SetTags(v []*Tag) *CreateRelationalDatabaseSnapshotInput {
+ s.Tags = v
+ return s
+}
+
+type CreateRelationalDatabaseSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateRelationalDatabaseSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRelationalDatabaseSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *CreateRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteAlarmInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the alarm to delete.
+ //
+ // AlarmName is a required field
+ AlarmName *string `locationName:"alarmName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteAlarmInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteAlarmInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteAlarmInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteAlarmInput"}
+ if s.AlarmName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AlarmName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAlarmName sets the AlarmName field's value.
+func (s *DeleteAlarmInput) SetAlarmName(v string) *DeleteAlarmInput {
+ s.AlarmName = &v
+ return s
+}
+
+type DeleteAlarmOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteAlarmOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteAlarmOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteAlarmOutput) SetOperations(v []*Operation) *DeleteAlarmOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteAutoSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The date of the automatic snapshot to delete in YYYY-MM-DD format. Use the
+ // get auto snapshots operation to get the available automatic snapshots for
+ // a resource.
+ //
+ // Date is a required field
+ Date *string `locationName:"date" type:"string" required:"true"`
+
+ // The name of the source instance or disk from which to delete the automatic
+ // snapshot.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteAutoSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteAutoSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteAutoSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteAutoSnapshotInput"}
+ if s.Date == nil {
+ invalidParams.Add(request.NewErrParamRequired("Date"))
+ }
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDate sets the Date field's value.
+func (s *DeleteAutoSnapshotInput) SetDate(v string) *DeleteAutoSnapshotInput {
+ s.Date = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *DeleteAutoSnapshotInput) SetResourceName(v string) *DeleteAutoSnapshotInput {
+ s.ResourceName = &v
+ return s
+}
+
+type DeleteAutoSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteAutoSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteAutoSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteAutoSnapshotOutput) SetOperations(v []*Operation) *DeleteAutoSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteBucketAccessKeyInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the access key to delete.
+ //
+ // Use the GetBucketAccessKeys action to get a list of access key IDs that you
+ // can specify.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `locationName:"accessKeyId" type:"string" required:"true"`
+
+ // The name of the bucket that the access key belongs to.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAccessKeyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAccessKeyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketAccessKeyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAccessKeyInput"}
+ if s.AccessKeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessKeyId"))
+ }
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *DeleteBucketAccessKeyInput) SetAccessKeyId(v string) *DeleteBucketAccessKeyInput {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *DeleteBucketAccessKeyInput) SetBucketName(v string) *DeleteBucketAccessKeyInput {
+ s.BucketName = &v
+ return s
+}
+
+type DeleteBucketAccessKeyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAccessKeyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAccessKeyOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteBucketAccessKeyOutput) SetOperations(v []*Operation) *DeleteBucketAccessKeyOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket to delete.
+ //
+ // Use the GetBuckets action to get a list of bucket names that you can specify.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+
+ // A Boolean value that indicates whether to force delete the bucket.
+ //
+ // You must force delete the bucket if it has one of the following conditions:
+ //
+ // * The bucket is the origin of a distribution.
+ //
+ // * The bucket has instances that were granted access to it using the SetResourceAccessForBucket
+ // action.
+ //
+ // * The bucket has objects.
+ //
+ // * The bucket has access keys.
+ //
+ // Force deleting a bucket might impact other resources that rely on the bucket,
+ // such as instances, distributions, or software that use the issued access
+ // keys.
+ ForceDelete *bool `locationName:"forceDelete" type:"boolean"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *DeleteBucketInput) SetBucketName(v string) *DeleteBucketInput {
+ s.BucketName = &v
+ return s
+}
+
+// SetForceDelete sets the ForceDelete field's value.
+func (s *DeleteBucketInput) SetForceDelete(v bool) *DeleteBucketInput {
+ s.ForceDelete = &v
+ return s
+}
+
+type DeleteBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteBucketOutput) SetOperations(v []*Operation) *DeleteBucketOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteCertificateInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the certificate to delete.
+ //
+ // Use the GetCertificates action to get a list of certificate names that you
+ // can specify.
+ //
+ // CertificateName is a required field
+ CertificateName *string `locationName:"certificateName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteCertificateInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteCertificateInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteCertificateInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteCertificateInput"}
+ if s.CertificateName == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *DeleteCertificateInput) SetCertificateName(v string) *DeleteCertificateInput {
+ s.CertificateName = &v
+ return s
+}
+
+type DeleteCertificateOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteCertificateOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteCertificateOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteCertificateOutput) SetOperations(v []*Operation) *DeleteCertificateOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteContactMethodInput struct {
+ _ struct{} `type:"structure"`
+
+ // The protocol that will be deleted, such as Email or SMS (text messaging).
+ //
+ // To delete an Email and an SMS contact method if you added both, you must
+ // run separate DeleteContactMethod actions to delete each protocol.
+ //
+ // Protocol is a required field
+ Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"ContactProtocol"`
+}
+
+// String returns the string representation
+func (s DeleteContactMethodInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteContactMethodInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteContactMethodInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteContactMethodInput"}
+ if s.Protocol == nil {
+ invalidParams.Add(request.NewErrParamRequired("Protocol"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *DeleteContactMethodInput) SetProtocol(v string) *DeleteContactMethodInput {
+ s.Protocol = &v
+ return s
+}
+
+type DeleteContactMethodOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteContactMethodOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteContactMethodOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteContactMethodOutput) SetOperations(v []*Operation) *DeleteContactMethodOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteContainerImageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container image to delete from the container service.
+ //
+ // Use the GetContainerImages action to get the name of the container images
+ // that are registered to a container service.
+ //
+ // Container images sourced from your Lightsail container service, that are
+ // registered and stored on your service, start with a colon (:). For example,
+ // :container-service-1.mystaticwebsite.1. Container images sourced from a public
+ // registry like Docker Hub don't start with a colon. For example, nginx:latest
+ // or nginx.
+ //
+ // Image is a required field
+ Image *string `locationName:"image" type:"string" required:"true"`
+
+ // The name of the container service for which to delete a registered container
+ // image.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteContainerImageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteContainerImageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteContainerImageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteContainerImageInput"}
+ if s.Image == nil {
+ invalidParams.Add(request.NewErrParamRequired("Image"))
+ }
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetImage sets the Image field's value.
+func (s *DeleteContainerImageInput) SetImage(v string) *DeleteContainerImageInput {
+ s.Image = &v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *DeleteContainerImageInput) SetServiceName(v string) *DeleteContainerImageInput {
+ s.ServiceName = &v
+ return s
+}
+
+type DeleteContainerImageOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteContainerImageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteContainerImageOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteContainerServiceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container service to delete.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteContainerServiceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteContainerServiceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteContainerServiceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteContainerServiceInput"}
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *DeleteContainerServiceInput) SetServiceName(v string) *DeleteContainerServiceInput {
+ s.ServiceName = &v
+ return s
+}
+
+type DeleteContainerServiceOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteContainerServiceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteContainerServiceOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteDiskInput struct {
+ _ struct{} `type:"structure"`
+
+ // The unique name of the disk you want to delete (e.g., my-disk).
+ //
+ // DiskName is a required field
+ DiskName *string `locationName:"diskName" type:"string" required:"true"`
+
+ // A Boolean value to indicate whether to delete the enabled add-ons for the
+ // disk.
+ ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"`
+}
+
+// String returns the string representation
+func (s DeleteDiskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDiskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteDiskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteDiskInput"}
+ if s.DiskName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiskName sets the DiskName field's value.
+func (s *DeleteDiskInput) SetDiskName(v string) *DeleteDiskInput {
+ s.DiskName = &v
+ return s
+}
+
+// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value.
+func (s *DeleteDiskInput) SetForceDeleteAddOns(v bool) *DeleteDiskInput {
+ s.ForceDeleteAddOns = &v
+ return s
+}
+
+type DeleteDiskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteDiskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDiskOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteDiskOutput) SetOperations(v []*Operation) *DeleteDiskOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteDiskSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the disk snapshot you want to delete (e.g., my-disk-snapshot).
+ //
+ // DiskSnapshotName is a required field
+ DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteDiskSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDiskSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteDiskSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteDiskSnapshotInput"}
+ if s.DiskSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiskSnapshotName sets the DiskSnapshotName field's value.
+func (s *DeleteDiskSnapshotInput) SetDiskSnapshotName(v string) *DeleteDiskSnapshotInput {
+ s.DiskSnapshotName = &v
+ return s
+}
+
+type DeleteDiskSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteDiskSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDiskSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteDiskSnapshotOutput) SetOperations(v []*Operation) *DeleteDiskSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteDistributionInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the distribution to delete.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ DistributionName *string `locationName:"distributionName" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteDistributionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDistributionInput) GoString() string {
+ return s.String()
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *DeleteDistributionInput) SetDistributionName(v string) *DeleteDistributionInput {
+ s.DistributionName = &v
+ return s
+}
+
+type DeleteDistributionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the result of the action, such as the status of
+ // the request, the timestamp of the request, and the resources affected by
+ // the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteDistributionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDistributionOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *DeleteDistributionOutput) SetOperation(v *Operation) *DeleteDistributionOutput {
+ s.Operation = v
+ return s
+}
+
+type DeleteDomainEntryInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about your domain entries.
+ //
+ // DomainEntry is a required field
+ DomainEntry *DomainEntry `locationName:"domainEntry" type:"structure" required:"true"`
+
+ // The name of the domain entry to delete.
+ //
+ // DomainName is a required field
+ DomainName *string `locationName:"domainName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteDomainEntryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDomainEntryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteDomainEntryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteDomainEntryInput"}
+ if s.DomainEntry == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainEntry"))
+ }
+ if s.DomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDomainEntry sets the DomainEntry field's value.
+func (s *DeleteDomainEntryInput) SetDomainEntry(v *DomainEntry) *DeleteDomainEntryInput {
+ s.DomainEntry = v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *DeleteDomainEntryInput) SetDomainName(v string) *DeleteDomainEntryInput {
+ s.DomainName = &v
+ return s
+}
+
+type DeleteDomainEntryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteDomainEntryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDomainEntryOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *DeleteDomainEntryOutput) SetOperation(v *Operation) *DeleteDomainEntryOutput {
+ s.Operation = v
+ return s
+}
+
+type DeleteDomainInput struct {
+ _ struct{} `type:"structure"`
+
+ // The specific domain name to delete.
+ //
+ // DomainName is a required field
+ DomainName *string `locationName:"domainName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteDomainInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDomainInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteDomainInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteDomainInput"}
+ if s.DomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *DeleteDomainInput) SetDomainName(v string) *DeleteDomainInput {
+ s.DomainName = &v
+ return s
+}
+
+type DeleteDomainOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteDomainOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDomainOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *DeleteDomainOutput) SetOperation(v *Operation) *DeleteDomainOutput {
+ s.Operation = v
+ return s
+}
+
+type DeleteInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value to indicate whether to delete the enabled add-ons for the
+ // disk.
+ ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"`
+
+ // The name of the instance to delete.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteInstanceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteInstanceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value.
+func (s *DeleteInstanceInput) SetForceDeleteAddOns(v bool) *DeleteInstanceInput {
+ s.ForceDeleteAddOns = &v
+ return s
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *DeleteInstanceInput) SetInstanceName(v string) *DeleteInstanceInput {
+ s.InstanceName = &v
+ return s
+}
+
+type DeleteInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteInstanceOutput) SetOperations(v []*Operation) *DeleteInstanceOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteInstanceSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the snapshot to delete.
+ //
+ // InstanceSnapshotName is a required field
+ InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteInstanceSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteInstanceSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteInstanceSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceSnapshotInput"}
+ if s.InstanceSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceSnapshotName sets the InstanceSnapshotName field's value.
+func (s *DeleteInstanceSnapshotInput) SetInstanceSnapshotName(v string) *DeleteInstanceSnapshotInput {
+ s.InstanceSnapshotName = &v
+ return s
+}
+
+type DeleteInstanceSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteInstanceSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteInstanceSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteInstanceSnapshotOutput) SetOperations(v []*Operation) *DeleteInstanceSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteKeyPairInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the key pair to delete.
+ //
+ // KeyPairName is a required field
+ KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteKeyPairInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteKeyPairInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteKeyPairInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteKeyPairInput"}
+ if s.KeyPairName == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyPairName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKeyPairName sets the KeyPairName field's value.
+func (s *DeleteKeyPairInput) SetKeyPairName(v string) *DeleteKeyPairInput {
+ s.KeyPairName = &v
+ return s
+}
+
+type DeleteKeyPairOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteKeyPairOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteKeyPairOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *DeleteKeyPairOutput) SetOperation(v *Operation) *DeleteKeyPairOutput {
+ s.Operation = v
+ return s
+}
+
+type DeleteKnownHostKeysInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance for which you want to reset the host key or certificate.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteKnownHostKeysInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteKnownHostKeysInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteKnownHostKeysInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteKnownHostKeysInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *DeleteKnownHostKeysInput) SetInstanceName(v string) *DeleteKnownHostKeysInput {
+ s.InstanceName = &v
+ return s
+}
+
+type DeleteKnownHostKeysOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteKnownHostKeysOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteKnownHostKeysOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteKnownHostKeysOutput) SetOperations(v []*Operation) *DeleteKnownHostKeysOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteLoadBalancerInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the load balancer you want to delete.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteLoadBalancerInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLoadBalancerInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteLoadBalancerInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerInput"}
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *DeleteLoadBalancerInput) SetLoadBalancerName(v string) *DeleteLoadBalancerInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type DeleteLoadBalancerOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteLoadBalancerOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLoadBalancerOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteLoadBalancerOutput) SetOperations(v []*Operation) *DeleteLoadBalancerOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteLoadBalancerTlsCertificateInput struct {
+ _ struct{} `type:"structure"`
+
+ // The SSL/TLS certificate name.
+ //
+ // CertificateName is a required field
+ CertificateName *string `locationName:"certificateName" type:"string" required:"true"`
+
+ // When true, forces the deletion of an SSL/TLS certificate.
+ //
+ // There can be two certificates associated with a Lightsail load balancer:
+ // the primary and the backup. The force parameter is required when the primary
+ // SSL/TLS certificate is in use by an instance attached to the load balancer.
+ Force *bool `locationName:"force" type:"boolean"`
+
+ // The load balancer name.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteLoadBalancerTlsCertificateInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLoadBalancerTlsCertificateInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteLoadBalancerTlsCertificateInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerTlsCertificateInput"}
+ if s.CertificateName == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateName"))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *DeleteLoadBalancerTlsCertificateInput) SetCertificateName(v string) *DeleteLoadBalancerTlsCertificateInput {
+ s.CertificateName = &v
+ return s
+}
+
+// SetForce sets the Force field's value.
+func (s *DeleteLoadBalancerTlsCertificateInput) SetForce(v bool) *DeleteLoadBalancerTlsCertificateInput {
+ s.Force = &v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *DeleteLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *DeleteLoadBalancerTlsCertificateInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type DeleteLoadBalancerTlsCertificateOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteLoadBalancerTlsCertificateOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLoadBalancerTlsCertificateOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *DeleteLoadBalancerTlsCertificateOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteRelationalDatabaseInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the database snapshot created if skip final snapshot is false,
+ // which is the default value for that parameter.
+ //
+ // Specifying this parameter and also specifying the skip final snapshot parameter
+ // to true results in an error.
+ //
+ // Constraints:
+ //
+ // * Must contain from 2 to 255 alphanumeric characters, or hyphens.
+ //
+ // * The first and last character must be a letter or number.
+ FinalRelationalDatabaseSnapshotName *string `locationName:"finalRelationalDatabaseSnapshotName" type:"string"`
+
+ // The name of the database that you are deleting.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // Determines whether a final database snapshot is created before your database
+ // is deleted. If true is specified, no database snapshot is created. If false
+ // is specified, a database snapshot is created before your database is deleted.
+ //
+ // You must specify the final relational database snapshot name parameter if
+ // the skip final snapshot parameter is false.
+ //
+ // Default: false
+ SkipFinalSnapshot *bool `locationName:"skipFinalSnapshot" type:"boolean"`
+}
+
+// String returns the string representation
+func (s DeleteRelationalDatabaseInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRelationalDatabaseInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteRelationalDatabaseInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteRelationalDatabaseInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFinalRelationalDatabaseSnapshotName sets the FinalRelationalDatabaseSnapshotName field's value.
+func (s *DeleteRelationalDatabaseInput) SetFinalRelationalDatabaseSnapshotName(v string) *DeleteRelationalDatabaseInput {
+ s.FinalRelationalDatabaseSnapshotName = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *DeleteRelationalDatabaseInput) SetRelationalDatabaseName(v string) *DeleteRelationalDatabaseInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetSkipFinalSnapshot sets the SkipFinalSnapshot field's value.
+func (s *DeleteRelationalDatabaseInput) SetSkipFinalSnapshot(v bool) *DeleteRelationalDatabaseInput {
+ s.SkipFinalSnapshot = &v
+ return s
+}
+
+type DeleteRelationalDatabaseOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteRelationalDatabaseOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRelationalDatabaseOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteRelationalDatabaseOutput) SetOperations(v []*Operation) *DeleteRelationalDatabaseOutput {
+ s.Operations = v
+ return s
+}
+
+type DeleteRelationalDatabaseSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the database snapshot that you are deleting.
+ //
+ // RelationalDatabaseSnapshotName is a required field
+ RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteRelationalDatabaseSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRelationalDatabaseSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteRelationalDatabaseSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteRelationalDatabaseSnapshotInput"}
+ if s.RelationalDatabaseSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value.
+func (s *DeleteRelationalDatabaseSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *DeleteRelationalDatabaseSnapshotInput {
+ s.RelationalDatabaseSnapshotName = &v
+ return s
+}
+
+type DeleteRelationalDatabaseSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DeleteRelationalDatabaseSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRelationalDatabaseSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DeleteRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *DeleteRelationalDatabaseSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes the destination of a record.
+type DestinationInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the resource created at the destination.
+ Id *string `locationName:"id" type:"string"`
+
+ // The destination service of the record.
+ Service *string `locationName:"service" type:"string"`
+}
+
+// String returns the string representation
+func (s DestinationInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DestinationInfo) GoString() string {
+ return s.String()
+}
+
+// SetId sets the Id field's value.
+func (s *DestinationInfo) SetId(v string) *DestinationInfo {
+ s.Id = &v
+ return s
+}
+
+// SetService sets the Service field's value.
+func (s *DestinationInfo) SetService(v string) *DestinationInfo {
+ s.Service = &v
+ return s
+}
+
+type DetachCertificateFromDistributionInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the distribution from which to detach the certificate.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ //
+ // DistributionName is a required field
+ DistributionName *string `locationName:"distributionName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DetachCertificateFromDistributionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachCertificateFromDistributionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DetachCertificateFromDistributionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DetachCertificateFromDistributionInput"}
+ if s.DistributionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DistributionName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *DetachCertificateFromDistributionInput) SetDistributionName(v string) *DetachCertificateFromDistributionInput {
+ s.DistributionName = &v
+ return s
+}
+
+type DetachCertificateFromDistributionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the result of the action, such as the status of
+ // the request, the timestamp of the request, and the resources affected by
+ // the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s DetachCertificateFromDistributionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachCertificateFromDistributionOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *DetachCertificateFromDistributionOutput) SetOperation(v *Operation) *DetachCertificateFromDistributionOutput {
+ s.Operation = v
+ return s
+}
+
+type DetachDiskInput struct {
+ _ struct{} `type:"structure"`
+
+ // The unique name of the disk you want to detach from your instance (e.g.,
+ // my-disk).
+ //
+ // DiskName is a required field
+ DiskName *string `locationName:"diskName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DetachDiskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachDiskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DetachDiskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DetachDiskInput"}
+ if s.DiskName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiskName sets the DiskName field's value.
+func (s *DetachDiskInput) SetDiskName(v string) *DetachDiskInput {
+ s.DiskName = &v
+ return s
+}
+
+type DetachDiskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DetachDiskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachDiskOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DetachDiskOutput) SetOperations(v []*Operation) *DetachDiskOutput {
+ s.Operations = v
+ return s
+}
+
+type DetachInstancesFromLoadBalancerInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of strings containing the names of the instances you want to detach
+ // from the load balancer.
+ //
+ // InstanceNames is a required field
+ InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"`
+
+ // The name of the Lightsail load balancer.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DetachInstancesFromLoadBalancerInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachInstancesFromLoadBalancerInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DetachInstancesFromLoadBalancerInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DetachInstancesFromLoadBalancerInput"}
+ if s.InstanceNames == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceNames"))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceNames sets the InstanceNames field's value.
+func (s *DetachInstancesFromLoadBalancerInput) SetInstanceNames(v []*string) *DetachInstancesFromLoadBalancerInput {
+ s.InstanceNames = v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *DetachInstancesFromLoadBalancerInput) SetLoadBalancerName(v string) *DetachInstancesFromLoadBalancerInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type DetachInstancesFromLoadBalancerOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DetachInstancesFromLoadBalancerOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachInstancesFromLoadBalancerOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DetachInstancesFromLoadBalancerOutput) SetOperations(v []*Operation) *DetachInstancesFromLoadBalancerOutput {
+ s.Operations = v
+ return s
+}
+
+type DetachStaticIpInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the static IP to detach from the instance.
+ //
+ // StaticIpName is a required field
+ StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DetachStaticIpInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachStaticIpInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DetachStaticIpInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DetachStaticIpInput"}
+ if s.StaticIpName == nil {
+ invalidParams.Add(request.NewErrParamRequired("StaticIpName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetStaticIpName sets the StaticIpName field's value.
+func (s *DetachStaticIpInput) SetStaticIpName(v string) *DetachStaticIpInput {
+ s.StaticIpName = &v
+ return s
+}
+
+type DetachStaticIpOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DetachStaticIpOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DetachStaticIpOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DetachStaticIpOutput) SetOperations(v []*Operation) *DetachStaticIpOutput {
+ s.Operations = v
+ return s
+}
+
+type DisableAddOnInput struct {
+ _ struct{} `type:"structure"`
+
+ // The add-on type to disable.
+ //
+ // AddOnType is a required field
+ AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"`
+
+ // The name of the source resource for which to disable the add-on.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DisableAddOnInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisableAddOnInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisableAddOnInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisableAddOnInput"}
+ if s.AddOnType == nil {
+ invalidParams.Add(request.NewErrParamRequired("AddOnType"))
+ }
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOnType sets the AddOnType field's value.
+func (s *DisableAddOnInput) SetAddOnType(v string) *DisableAddOnInput {
+ s.AddOnType = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *DisableAddOnInput) SetResourceName(v string) *DisableAddOnInput {
+ s.ResourceName = &v
+ return s
+}
+
+type DisableAddOnOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s DisableAddOnOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DisableAddOnOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *DisableAddOnOutput) SetOperations(v []*Operation) *DisableAddOnOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes a block storage disk.
+type Disk struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects representing the add-ons enabled on the disk.
+ AddOns []*AddOn `locationName:"addOns" type:"list"`
+
+ // The Amazon Resource Name (ARN) of the disk.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The resources to which the disk is attached.
+ AttachedTo *string `locationName:"attachedTo" type:"string"`
+
+ // (Deprecated) The attachment state of the disk.
+ //
+ // In releases prior to November 14, 2017, this parameter returned attached
+ // for system disks in the API response. It is now deprecated, but still included
+ // in the response. Use isAttached instead.
+ //
+ // Deprecated: AttachmentState has been deprecated
+ AttachmentState *string `locationName:"attachmentState" deprecated:"true" type:"string"`
+
+ // The date when the disk was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // (Deprecated) The number of GB in use by the disk.
+ //
+ // In releases prior to November 14, 2017, this parameter was not included in
+ // the API response. It is now deprecated.
+ //
+ // Deprecated: GbInUse has been deprecated
+ GbInUse *int64 `locationName:"gbInUse" deprecated:"true" type:"integer"`
+
+ // The input/output operations per second (IOPS) of the disk.
+ Iops *int64 `locationName:"iops" type:"integer"`
+
+ // A Boolean value indicating whether the disk is attached.
+ IsAttached *bool `locationName:"isAttached" type:"boolean"`
+
+ // A Boolean value indicating whether this disk is a system disk (has an operating
+ // system loaded on it).
+ IsSystemDisk *bool `locationName:"isSystemDisk" type:"boolean"`
+
+ // The AWS Region and Availability Zone where the disk is located.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The unique name of the disk.
+ Name *string `locationName:"name" type:"string"`
+
+ // The disk path.
+ Path *string `locationName:"path" type:"string"`
+
+ // The Lightsail resource type (e.g., Disk).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The size of the disk in GB.
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer"`
+
+ // Describes the status of the disk.
+ State *string `locationName:"state" type:"string" enum:"DiskState"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about an instance or another resource in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s Disk) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Disk) GoString() string {
+ return s.String()
+}
+
+// SetAddOns sets the AddOns field's value.
+func (s *Disk) SetAddOns(v []*AddOn) *Disk {
+ s.AddOns = v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *Disk) SetArn(v string) *Disk {
+ s.Arn = &v
+ return s
+}
+
+// SetAttachedTo sets the AttachedTo field's value.
+func (s *Disk) SetAttachedTo(v string) *Disk {
+ s.AttachedTo = &v
+ return s
+}
+
+// SetAttachmentState sets the AttachmentState field's value.
+func (s *Disk) SetAttachmentState(v string) *Disk {
+ s.AttachmentState = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Disk) SetCreatedAt(v time.Time) *Disk {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetGbInUse sets the GbInUse field's value.
+func (s *Disk) SetGbInUse(v int64) *Disk {
+ s.GbInUse = &v
+ return s
+}
+
+// SetIops sets the Iops field's value.
+func (s *Disk) SetIops(v int64) *Disk {
+ s.Iops = &v
+ return s
+}
+
+// SetIsAttached sets the IsAttached field's value.
+func (s *Disk) SetIsAttached(v bool) *Disk {
+ s.IsAttached = &v
+ return s
+}
+
+// SetIsSystemDisk sets the IsSystemDisk field's value.
+func (s *Disk) SetIsSystemDisk(v bool) *Disk {
+ s.IsSystemDisk = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *Disk) SetLocation(v *ResourceLocation) *Disk {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Disk) SetName(v string) *Disk {
+ s.Name = &v
+ return s
+}
+
+// SetPath sets the Path field's value.
+func (s *Disk) SetPath(v string) *Disk {
+ s.Path = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *Disk) SetResourceType(v string) *Disk {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *Disk) SetSizeInGb(v int64) *Disk {
+ s.SizeInGb = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *Disk) SetState(v string) *Disk {
+ s.State = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *Disk) SetSupportCode(v string) *Disk {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *Disk) SetTags(v []*Tag) *Disk {
+ s.Tags = v
+ return s
+}
+
+// Describes a disk.
+type DiskInfo struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value indicating whether this disk is a system disk (has an operating
+ // system loaded on it).
+ IsSystemDisk *bool `locationName:"isSystemDisk" type:"boolean"`
+
+ // The disk name.
+ Name *string `locationName:"name" type:"string"`
+
+ // The disk path.
+ Path *string `locationName:"path" type:"string"`
+
+ // The size of the disk in GB (e.g., 32).
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer"`
+}
+
+// String returns the string representation
+func (s DiskInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DiskInfo) GoString() string {
+ return s.String()
+}
+
+// SetIsSystemDisk sets the IsSystemDisk field's value.
+func (s *DiskInfo) SetIsSystemDisk(v bool) *DiskInfo {
+ s.IsSystemDisk = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *DiskInfo) SetName(v string) *DiskInfo {
+ s.Name = &v
+ return s
+}
+
+// SetPath sets the Path field's value.
+func (s *DiskInfo) SetPath(v string) *DiskInfo {
+ s.Path = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *DiskInfo) SetSizeInGb(v int64) *DiskInfo {
+ s.SizeInGb = &v
+ return s
+}
+
+// Describes a block storage disk mapping.
+type DiskMap struct {
+ _ struct{} `type:"structure"`
+
+ // The new disk name (e.g., my-new-disk).
+ NewDiskName *string `locationName:"newDiskName" type:"string"`
+
+ // The original disk path exposed to the instance (for example, /dev/sdh).
+ OriginalDiskPath *string `locationName:"originalDiskPath" type:"string"`
+}
+
+// String returns the string representation
+func (s DiskMap) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DiskMap) GoString() string {
+ return s.String()
+}
+
+// SetNewDiskName sets the NewDiskName field's value.
+func (s *DiskMap) SetNewDiskName(v string) *DiskMap {
+ s.NewDiskName = &v
+ return s
+}
+
+// SetOriginalDiskPath sets the OriginalDiskPath field's value.
+func (s *DiskMap) SetOriginalDiskPath(v string) *DiskMap {
+ s.OriginalDiskPath = &v
+ return s
+}
+
+// Describes a block storage disk snapshot.
+type DiskSnapshot struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the disk snapshot.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The date when the disk snapshot was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The Amazon Resource Name (ARN) of the source disk from which the disk snapshot
+ // was created.
+ FromDiskArn *string `locationName:"fromDiskArn" type:"string"`
+
+ // The unique name of the source disk from which the disk snapshot was created.
+ FromDiskName *string `locationName:"fromDiskName" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the source instance from which the disk
+ // (system volume) snapshot was created.
+ FromInstanceArn *string `locationName:"fromInstanceArn" type:"string"`
+
+ // The unique name of the source instance from which the disk (system volume)
+ // snapshot was created.
+ FromInstanceName *string `locationName:"fromInstanceName" type:"string"`
+
+ // A Boolean value indicating whether the snapshot was created from an automatic
+ // snapshot.
+ IsFromAutoSnapshot *bool `locationName:"isFromAutoSnapshot" type:"boolean"`
+
+ // The AWS Region and Availability Zone where the disk snapshot was created.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the disk snapshot (e.g., my-disk-snapshot).
+ Name *string `locationName:"name" type:"string"`
+
+ // The progress of the snapshot.
+ Progress *string `locationName:"progress" type:"string"`
+
+ // The Lightsail resource type (e.g., DiskSnapshot).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The size of the disk in GB.
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer"`
+
+ // The status of the disk snapshot operation.
+ State *string `locationName:"state" type:"string" enum:"DiskSnapshotState"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about an instance or another resource in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s DiskSnapshot) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DiskSnapshot) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *DiskSnapshot) SetArn(v string) *DiskSnapshot {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *DiskSnapshot) SetCreatedAt(v time.Time) *DiskSnapshot {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetFromDiskArn sets the FromDiskArn field's value.
+func (s *DiskSnapshot) SetFromDiskArn(v string) *DiskSnapshot {
+ s.FromDiskArn = &v
+ return s
+}
+
+// SetFromDiskName sets the FromDiskName field's value.
+func (s *DiskSnapshot) SetFromDiskName(v string) *DiskSnapshot {
+ s.FromDiskName = &v
+ return s
+}
+
+// SetFromInstanceArn sets the FromInstanceArn field's value.
+func (s *DiskSnapshot) SetFromInstanceArn(v string) *DiskSnapshot {
+ s.FromInstanceArn = &v
+ return s
+}
+
+// SetFromInstanceName sets the FromInstanceName field's value.
+func (s *DiskSnapshot) SetFromInstanceName(v string) *DiskSnapshot {
+ s.FromInstanceName = &v
+ return s
+}
+
+// SetIsFromAutoSnapshot sets the IsFromAutoSnapshot field's value.
+func (s *DiskSnapshot) SetIsFromAutoSnapshot(v bool) *DiskSnapshot {
+ s.IsFromAutoSnapshot = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *DiskSnapshot) SetLocation(v *ResourceLocation) *DiskSnapshot {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *DiskSnapshot) SetName(v string) *DiskSnapshot {
+ s.Name = &v
+ return s
+}
+
+// SetProgress sets the Progress field's value.
+func (s *DiskSnapshot) SetProgress(v string) *DiskSnapshot {
+ s.Progress = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *DiskSnapshot) SetResourceType(v string) *DiskSnapshot {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *DiskSnapshot) SetSizeInGb(v int64) *DiskSnapshot {
+ s.SizeInGb = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *DiskSnapshot) SetState(v string) *DiskSnapshot {
+ s.State = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *DiskSnapshot) SetSupportCode(v string) *DiskSnapshot {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *DiskSnapshot) SetTags(v []*Tag) *DiskSnapshot {
+ s.Tags = v
+ return s
+}
+
+// Describes a disk snapshot.
+type DiskSnapshotInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The size of the disk in GB (e.g., 32).
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer"`
+}
+
+// String returns the string representation
+func (s DiskSnapshotInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DiskSnapshotInfo) GoString() string {
+ return s.String()
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *DiskSnapshotInfo) SetSizeInGb(v int64) *DiskSnapshotInfo {
+ s.SizeInGb = &v
+ return s
+}
+
+// Describes the specifications of a distribution bundle.
+type DistributionBundle struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the bundle.
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // Indicates whether the bundle is active, and can be specified for a new or
+ // existing distribution.
+ IsActive *bool `locationName:"isActive" type:"boolean"`
+
+ // The name of the distribution bundle.
+ Name *string `locationName:"name" type:"string"`
+
+ // The monthly price, in US dollars, of the bundle.
+ Price *float64 `locationName:"price" type:"float"`
+
+ // The monthly network transfer quota of the bundle.
+ TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"`
+}
+
+// String returns the string representation
+func (s DistributionBundle) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DistributionBundle) GoString() string {
+ return s.String()
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *DistributionBundle) SetBundleId(v string) *DistributionBundle {
+ s.BundleId = &v
+ return s
+}
+
+// SetIsActive sets the IsActive field's value.
+func (s *DistributionBundle) SetIsActive(v bool) *DistributionBundle {
+ s.IsActive = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *DistributionBundle) SetName(v string) *DistributionBundle {
+ s.Name = &v
+ return s
+}
+
+// SetPrice sets the Price field's value.
+func (s *DistributionBundle) SetPrice(v float64) *DistributionBundle {
+ s.Price = &v
+ return s
+}
+
+// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value.
+func (s *DistributionBundle) SetTransferPerMonthInGb(v int64) *DistributionBundle {
+ s.TransferPerMonthInGb = &v
+ return s
+}
+
+// Describes a domain where you are storing recordsets.
+type Domain struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the domain recordset (e.g., arn:aws:lightsail:global:123456789101:Domain/824cede0-abc7-4f84-8dbc-12345EXAMPLE).
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The date when the domain recordset was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // An array of key-value pairs containing information about the domain entries.
+ DomainEntries []*DomainEntry `locationName:"domainEntries" type:"list"`
+
+ // The AWS Region and Availability Zones where the domain recordset was created.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the domain.
+ Name *string `locationName:"name" type:"string"`
+
+ // The resource type.
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about an instance or another resource in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s Domain) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Domain) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *Domain) SetArn(v string) *Domain {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Domain) SetCreatedAt(v time.Time) *Domain {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDomainEntries sets the DomainEntries field's value.
+func (s *Domain) SetDomainEntries(v []*DomainEntry) *Domain {
+ s.DomainEntries = v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *Domain) SetLocation(v *ResourceLocation) *Domain {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Domain) SetName(v string) *Domain {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *Domain) SetResourceType(v string) *Domain {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *Domain) SetSupportCode(v string) *Domain {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *Domain) SetTags(v []*Tag) *Domain {
+ s.Tags = v
+ return s
+}
+
+// Describes a domain recordset entry.
+type DomainEntry struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the domain recordset entry.
+ Id *string `locationName:"id" type:"string"`
+
+ // When true, specifies whether the domain entry is an alias used by the Lightsail
+ // load balancer. You can include an alias (A type) record in your request,
+ // which points to a load balancer DNS name and routes traffic to your load
+ // balancer.
+ IsAlias *bool `locationName:"isAlias" type:"boolean"`
+
+ // The name of the domain.
+ Name *string `locationName:"name" type:"string"`
+
+ // (Deprecated) The options for the domain entry.
+ //
+ // In releases prior to November 29, 2017, this parameter was not included in
+ // the API response. It is now deprecated.
+ //
+ // Deprecated: Options has been deprecated
+ Options map[string]*string `locationName:"options" deprecated:"true" type:"map"`
+
+ // The target IP address (e.g., 192.0.2.0), or AWS name server (e.g., ns-111.awsdns-22.com.).
+ //
+ // For Lightsail load balancers, the value looks like ab1234c56789c6b86aba6fb203d443bc-123456789.us-east-2.elb.amazonaws.com.
+ // For Lightsail distributions, the value looks like exampled1182ne.cloudfront.net.
+ // For Lightsail container services, the value looks like container-service-1.example23scljs.us-west-2.cs.amazonlightsail.com.
+ // Be sure to also set isAlias to true when setting up an A record for a Lightsail
+ // load balancer, distribution, or container service.
+ Target *string `locationName:"target" type:"string"`
+
+ // The type of domain entry, such as address for IPv4 (A), address for IPv6
+ // (AAAA), canonical name (CNAME), mail exchanger (MX), name server (NS), start
+ // of authority (SOA), service locator (SRV), or text (TXT).
+ //
+ // The following domain entry types can be used:
+ //
+ // * A
+ //
+ // * AAAA
+ //
+ // * CNAME
+ //
+ // * MX
+ //
+ // * NS
+ //
+ // * SOA
+ //
+ // * SRV
+ //
+ // * TXT
+ Type *string `locationName:"type" type:"string"`
+}
+
+// String returns the string representation
+func (s DomainEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DomainEntry) GoString() string {
+ return s.String()
+}
+
+// SetId sets the Id field's value.
+func (s *DomainEntry) SetId(v string) *DomainEntry {
+ s.Id = &v
+ return s
+}
+
+// SetIsAlias sets the IsAlias field's value.
+func (s *DomainEntry) SetIsAlias(v bool) *DomainEntry {
+ s.IsAlias = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *DomainEntry) SetName(v string) *DomainEntry {
+ s.Name = &v
+ return s
+}
+
+// SetOptions sets the Options field's value.
+func (s *DomainEntry) SetOptions(v map[string]*string) *DomainEntry {
+ s.Options = v
+ return s
+}
+
+// SetTarget sets the Target field's value.
+func (s *DomainEntry) SetTarget(v string) *DomainEntry {
+ s.Target = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *DomainEntry) SetType(v string) *DomainEntry {
+ s.Type = &v
+ return s
+}
+
+// Describes the domain validation records of an Amazon Lightsail SSL/TLS certificate.
+type DomainValidationRecord struct {
+ _ struct{} `type:"structure"`
+
+ // The domain name of the certificate validation record. For example, example.com
+ // or www.example.com.
+ DomainName *string `locationName:"domainName" type:"string"`
+
+ // An object that describes the DNS records to add to your domain's DNS to validate
+ // it for the certificate.
+ ResourceRecord *ResourceRecord `locationName:"resourceRecord" type:"structure"`
+}
+
+// String returns the string representation
+func (s DomainValidationRecord) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DomainValidationRecord) GoString() string {
+ return s.String()
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *DomainValidationRecord) SetDomainName(v string) *DomainValidationRecord {
+ s.DomainName = &v
+ return s
+}
+
+// SetResourceRecord sets the ResourceRecord field's value.
+func (s *DomainValidationRecord) SetResourceRecord(v *ResourceRecord) *DomainValidationRecord {
+ s.ResourceRecord = v
+ return s
+}
+
+type DownloadDefaultKeyPairInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DownloadDefaultKeyPairInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DownloadDefaultKeyPairInput) GoString() string {
+ return s.String()
+}
+
+type DownloadDefaultKeyPairOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A base64-encoded RSA private key.
+ PrivateKeyBase64 *string `locationName:"privateKeyBase64" type:"string"`
+
+ // A base64-encoded public key of the ssh-rsa type.
+ PublicKeyBase64 *string `locationName:"publicKeyBase64" type:"string"`
+}
+
+// String returns the string representation
+func (s DownloadDefaultKeyPairOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DownloadDefaultKeyPairOutput) GoString() string {
+ return s.String()
+}
+
+// SetPrivateKeyBase64 sets the PrivateKeyBase64 field's value.
+func (s *DownloadDefaultKeyPairOutput) SetPrivateKeyBase64(v string) *DownloadDefaultKeyPairOutput {
+ s.PrivateKeyBase64 = &v
+ return s
+}
+
+// SetPublicKeyBase64 sets the PublicKeyBase64 field's value.
+func (s *DownloadDefaultKeyPairOutput) SetPublicKeyBase64(v string) *DownloadDefaultKeyPairOutput {
+ s.PublicKeyBase64 = &v
+ return s
+}
+
+type EnableAddOnInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of strings representing the add-on to enable or modify.
+ //
+ // AddOnRequest is a required field
+ AddOnRequest *AddOnRequest `locationName:"addOnRequest" type:"structure" required:"true"`
+
+ // The name of the source resource for which to enable or modify the add-on.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s EnableAddOnInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EnableAddOnInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *EnableAddOnInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "EnableAddOnInput"}
+ if s.AddOnRequest == nil {
+ invalidParams.Add(request.NewErrParamRequired("AddOnRequest"))
+ }
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+ if s.AddOnRequest != nil {
+ if err := s.AddOnRequest.Validate(); err != nil {
+ invalidParams.AddNested("AddOnRequest", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOnRequest sets the AddOnRequest field's value.
+func (s *EnableAddOnInput) SetAddOnRequest(v *AddOnRequest) *EnableAddOnInput {
+ s.AddOnRequest = v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *EnableAddOnInput) SetResourceName(v string) *EnableAddOnInput {
+ s.ResourceName = &v
+ return s
+}
+
+type EnableAddOnOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s EnableAddOnOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EnableAddOnOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *EnableAddOnOutput) SetOperations(v []*Operation) *EnableAddOnOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes the settings of a public endpoint for an Amazon Lightsail container
+// service.
+type EndpointRequest struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container for the endpoint.
+ //
+ // ContainerName is a required field
+ ContainerName *string `locationName:"containerName" type:"string" required:"true"`
+
+ // The port of the container to which traffic is forwarded to.
+ //
+ // ContainerPort is a required field
+ ContainerPort *int64 `locationName:"containerPort" type:"integer" required:"true"`
+
+ // An object that describes the health check configuration of the container.
+ HealthCheck *ContainerServiceHealthCheckConfig `locationName:"healthCheck" type:"structure"`
+}
+
+// String returns the string representation
+func (s EndpointRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EndpointRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *EndpointRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "EndpointRequest"}
+ if s.ContainerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ContainerName"))
+ }
+ if s.ContainerPort == nil {
+ invalidParams.Add(request.NewErrParamRequired("ContainerPort"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetContainerName sets the ContainerName field's value.
+func (s *EndpointRequest) SetContainerName(v string) *EndpointRequest {
+ s.ContainerName = &v
+ return s
+}
+
+// SetContainerPort sets the ContainerPort field's value.
+func (s *EndpointRequest) SetContainerPort(v int64) *EndpointRequest {
+ s.ContainerPort = &v
+ return s
+}
+
+// SetHealthCheck sets the HealthCheck field's value.
+func (s *EndpointRequest) SetHealthCheck(v *ContainerServiceHealthCheckConfig) *EndpointRequest {
+ s.HealthCheck = v
+ return s
+}
+
+type ExportSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance or disk snapshot to be exported to Amazon EC2.
+ //
+ // SourceSnapshotName is a required field
+ SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ExportSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ExportSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ExportSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ExportSnapshotInput"}
+ if s.SourceSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetSourceSnapshotName sets the SourceSnapshotName field's value.
+func (s *ExportSnapshotInput) SetSourceSnapshotName(v string) *ExportSnapshotInput {
+ s.SourceSnapshotName = &v
+ return s
+}
+
+type ExportSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s ExportSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ExportSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *ExportSnapshotOutput) SetOperations(v []*Operation) *ExportSnapshotOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes an export snapshot record.
+type ExportSnapshotRecord struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the export snapshot record.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The date when the export snapshot record was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // A list of objects describing the destination of the export snapshot record.
+ DestinationInfo *DestinationInfo `locationName:"destinationInfo" type:"structure"`
+
+ // The AWS Region and Availability Zone where the export snapshot record is
+ // located.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The export snapshot record name.
+ Name *string `locationName:"name" type:"string"`
+
+ // The Lightsail resource type (e.g., ExportSnapshotRecord).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // A list of objects describing the source of the export snapshot record.
+ SourceInfo *ExportSnapshotRecordSourceInfo `locationName:"sourceInfo" type:"structure"`
+
+ // The state of the export snapshot record.
+ State *string `locationName:"state" type:"string" enum:"RecordState"`
+}
+
+// String returns the string representation
+func (s ExportSnapshotRecord) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ExportSnapshotRecord) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *ExportSnapshotRecord) SetArn(v string) *ExportSnapshotRecord {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *ExportSnapshotRecord) SetCreatedAt(v time.Time) *ExportSnapshotRecord {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDestinationInfo sets the DestinationInfo field's value.
+func (s *ExportSnapshotRecord) SetDestinationInfo(v *DestinationInfo) *ExportSnapshotRecord {
+ s.DestinationInfo = v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *ExportSnapshotRecord) SetLocation(v *ResourceLocation) *ExportSnapshotRecord {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ExportSnapshotRecord) SetName(v string) *ExportSnapshotRecord {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *ExportSnapshotRecord) SetResourceType(v string) *ExportSnapshotRecord {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSourceInfo sets the SourceInfo field's value.
+func (s *ExportSnapshotRecord) SetSourceInfo(v *ExportSnapshotRecordSourceInfo) *ExportSnapshotRecord {
+ s.SourceInfo = v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *ExportSnapshotRecord) SetState(v string) *ExportSnapshotRecord {
+ s.State = &v
+ return s
+}
+
+// Describes the source of an export snapshot record.
+type ExportSnapshotRecordSourceInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the source instance or disk snapshot.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The date when the source instance or disk snapshot was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // A list of objects describing a disk snapshot.
+ DiskSnapshotInfo *DiskSnapshotInfo `locationName:"diskSnapshotInfo" type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the snapshot's source instance or disk.
+ FromResourceArn *string `locationName:"fromResourceArn" type:"string"`
+
+ // The name of the snapshot's source instance or disk.
+ FromResourceName *string `locationName:"fromResourceName" type:"string"`
+
+ // A list of objects describing an instance snapshot.
+ InstanceSnapshotInfo *InstanceSnapshotInfo `locationName:"instanceSnapshotInfo" type:"structure"`
+
+ // The name of the source instance or disk snapshot.
+ Name *string `locationName:"name" type:"string"`
+
+ // The Lightsail resource type (e.g., InstanceSnapshot or DiskSnapshot).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ExportSnapshotRecordSourceType"`
+}
+
+// String returns the string representation
+func (s ExportSnapshotRecordSourceInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ExportSnapshotRecordSourceInfo) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetArn(v string) *ExportSnapshotRecordSourceInfo {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetCreatedAt(v time.Time) *ExportSnapshotRecordSourceInfo {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDiskSnapshotInfo sets the DiskSnapshotInfo field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetDiskSnapshotInfo(v *DiskSnapshotInfo) *ExportSnapshotRecordSourceInfo {
+ s.DiskSnapshotInfo = v
+ return s
+}
+
+// SetFromResourceArn sets the FromResourceArn field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetFromResourceArn(v string) *ExportSnapshotRecordSourceInfo {
+ s.FromResourceArn = &v
+ return s
+}
+
+// SetFromResourceName sets the FromResourceName field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetFromResourceName(v string) *ExportSnapshotRecordSourceInfo {
+ s.FromResourceName = &v
+ return s
+}
+
+// SetInstanceSnapshotInfo sets the InstanceSnapshotInfo field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetInstanceSnapshotInfo(v *InstanceSnapshotInfo) *ExportSnapshotRecordSourceInfo {
+ s.InstanceSnapshotInfo = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetName(v string) *ExportSnapshotRecordSourceInfo {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *ExportSnapshotRecordSourceInfo) SetResourceType(v string) *ExportSnapshotRecordSourceInfo {
+ s.ResourceType = &v
+ return s
+}
+
+type GetActiveNamesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetActiveNames request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetActiveNamesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetActiveNamesInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetActiveNamesInput) SetPageToken(v string) *GetActiveNamesInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetActiveNamesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of active names returned by the get active names request.
+ ActiveNames []*string `locationName:"activeNames" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetActiveNames request and
+ // specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetActiveNamesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetActiveNamesOutput) GoString() string {
+ return s.String()
+}
+
+// SetActiveNames sets the ActiveNames field's value.
+func (s *GetActiveNamesOutput) SetActiveNames(v []*string) *GetActiveNamesOutput {
+ s.ActiveNames = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetActiveNamesOutput) SetNextPageToken(v string) *GetActiveNamesOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetAlarmsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the alarm.
+ //
+ // Specify an alarm name to return information about a specific alarm.
+ AlarmName *string `locationName:"alarmName" type:"string"`
+
+ // The name of the Lightsail resource being monitored by the alarm.
+ //
+ // Specify a monitored resource name to return information about all alarms
+ // for a specific resource.
+ MonitoredResourceName *string `locationName:"monitoredResourceName" type:"string"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetAlarms request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetAlarmsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAlarmsInput) GoString() string {
+ return s.String()
+}
+
+// SetAlarmName sets the AlarmName field's value.
+func (s *GetAlarmsInput) SetAlarmName(v string) *GetAlarmsInput {
+ s.AlarmName = &v
+ return s
+}
+
+// SetMonitoredResourceName sets the MonitoredResourceName field's value.
+func (s *GetAlarmsInput) SetMonitoredResourceName(v string) *GetAlarmsInput {
+ s.MonitoredResourceName = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetAlarmsInput) SetPageToken(v string) *GetAlarmsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetAlarmsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the alarms.
+ Alarms []*Alarm `locationName:"alarms" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetAlarms request and specify
+ // the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetAlarmsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAlarmsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAlarms sets the Alarms field's value.
+func (s *GetAlarmsOutput) SetAlarms(v []*Alarm) *GetAlarmsOutput {
+ s.Alarms = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetAlarmsOutput) SetNextPageToken(v string) *GetAlarmsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetAutoSnapshotsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the source instance or disk from which to get automatic snapshot
+ // information.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetAutoSnapshotsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAutoSnapshotsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetAutoSnapshotsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetAutoSnapshotsInput"}
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *GetAutoSnapshotsInput) SetResourceName(v string) *GetAutoSnapshotsInput {
+ s.ResourceName = &v
+ return s
+}
+
+type GetAutoSnapshotsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the automatic snapshots that are available
+ // for the specified source instance or disk.
+ AutoSnapshots []*AutoSnapshotDetails `locationName:"autoSnapshots" type:"list"`
+
+ // The name of the source instance or disk for the automatic snapshots.
+ ResourceName *string `locationName:"resourceName" type:"string"`
+
+ // The resource type (e.g., Instance or Disk).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+}
+
+// String returns the string representation
+func (s GetAutoSnapshotsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAutoSnapshotsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAutoSnapshots sets the AutoSnapshots field's value.
+func (s *GetAutoSnapshotsOutput) SetAutoSnapshots(v []*AutoSnapshotDetails) *GetAutoSnapshotsOutput {
+ s.AutoSnapshots = v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *GetAutoSnapshotsOutput) SetResourceName(v string) *GetAutoSnapshotsOutput {
+ s.ResourceName = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *GetAutoSnapshotsOutput) SetResourceType(v string) *GetAutoSnapshotsOutput {
+ s.ResourceType = &v
+ return s
+}
+
+type GetBlueprintsInput struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value indicating whether to include inactive results in your request.
+ IncludeInactive *bool `locationName:"includeInactive" type:"boolean"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetBlueprints request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetBlueprintsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBlueprintsInput) GoString() string {
+ return s.String()
+}
+
+// SetIncludeInactive sets the IncludeInactive field's value.
+func (s *GetBlueprintsInput) SetIncludeInactive(v bool) *GetBlueprintsInput {
+ s.IncludeInactive = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetBlueprintsInput) SetPageToken(v string) *GetBlueprintsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetBlueprintsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs that contains information about the available
+ // blueprints.
+ Blueprints []*Blueprint `locationName:"blueprints" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetBlueprints request and
+ // specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetBlueprintsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBlueprintsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBlueprints sets the Blueprints field's value.
+func (s *GetBlueprintsOutput) SetBlueprints(v []*Blueprint) *GetBlueprintsOutput {
+ s.Blueprints = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetBlueprintsOutput) SetNextPageToken(v string) *GetBlueprintsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetBucketAccessKeysInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket for which to return access keys.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAccessKeysInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccessKeysInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAccessKeysInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAccessKeysInput"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *GetBucketAccessKeysInput) SetBucketName(v string) *GetBucketAccessKeysInput {
+ s.BucketName = &v
+ return s
+}
+
+type GetBucketAccessKeysOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the access keys for the specified bucket.
+ AccessKeys []*AccessKey `locationName:"accessKeys" type:"list"`
+}
+
+// String returns the string representation
+func (s GetBucketAccessKeysOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccessKeysOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeys sets the AccessKeys field's value.
+func (s *GetBucketAccessKeysOutput) SetAccessKeys(v []*AccessKey) *GetBucketAccessKeysOutput {
+ s.AccessKeys = v
+ return s
+}
+
+type GetBucketBundlesInput struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value that indicates whether to include inactive (unavailable)
+ // bundles in the response.
+ IncludeInactive *bool `locationName:"includeInactive" type:"boolean"`
+}
+
+// String returns the string representation
+func (s GetBucketBundlesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketBundlesInput) GoString() string {
+ return s.String()
+}
+
+// SetIncludeInactive sets the IncludeInactive field's value.
+func (s *GetBucketBundlesInput) SetIncludeInactive(v bool) *GetBucketBundlesInput {
+ s.IncludeInactive = &v
+ return s
+}
+
+type GetBucketBundlesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes bucket bundles.
+ Bundles []*BucketBundle `locationName:"bundles" type:"list"`
+}
+
+// String returns the string representation
+func (s GetBucketBundlesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketBundlesOutput) GoString() string {
+ return s.String()
+}
+
+// SetBundles sets the Bundles field's value.
+func (s *GetBucketBundlesOutput) SetBundles(v []*BucketBundle) *GetBucketBundlesOutput {
+ s.Bundles = v
+ return s
+}
+
+type GetBucketMetricDataInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket for which to get metric data.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+
+ // The timestamp indicating the latest data to be returned.
+ //
+ // EndTime is a required field
+ EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"`
+
+ // The metric for which you want to return information.
+ //
+ // Valid bucket metric names are listed below, along with the most useful statistics
+ // to include in your request, and the published unit value.
+ //
+ // These bucket metrics are reported once per day.
+ //
+ // * BucketSizeBytes - The amount of data in bytes stored in a bucket. This
+ // value is calculated by summing the size of all objects in the bucket (including
+ // object versions), including the size of all parts for all incomplete multipart
+ // uploads to the bucket. Statistics: The most useful statistic is Maximum.
+ // Unit: The published unit is Bytes.
+ //
+ // * NumberOfObjects - The total number of objects stored in a bucket. This
+ // value is calculated by counting all objects in the bucket (including object
+ // versions) and the total number of parts for all incomplete multipart uploads
+ // to the bucket. Statistics: The most useful statistic is Average. Unit:
+ // The published unit is Count.
+ //
+ // MetricName is a required field
+ MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"BucketMetricName"`
+
+ // The granularity, in seconds, of the returned data points.
+ //
+ // Bucket storage metrics are reported once per day. Therefore, you should specify
+ // a period of 86400 seconds, which is the number of seconds in a day.
+ //
+ // Period is a required field
+ Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"`
+
+ // The timestamp indicating the earliest data to be returned.
+ //
+ // StartTime is a required field
+ StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
+
+ // The statistic for the metric.
+ //
+ // The following statistics are available:
+ //
+ // * Minimum - The lowest value observed during the specified period. Use
+ // this value to determine low volumes of activity for your application.
+ //
+ // * Maximum - The highest value observed during the specified period. Use
+ // this value to determine high volumes of activity for your application.
+ //
+ // * Sum - The sum of all values submitted for the matching metric. You can
+ // use this statistic to determine the total volume of a metric.
+ //
+ // * Average - The value of Sum / SampleCount during the specified period.
+ // By comparing this statistic with the Minimum and Maximum values, you can
+ // determine the full scope of a metric and how close the average use is
+ // to the Minimum and Maximum values. This comparison helps you to know when
+ // to increase or decrease your resources.
+ //
+ // * SampleCount - The count, or number, of data points used for the statistical
+ // calculation.
+ //
+ // Statistics is a required field
+ Statistics []*string `locationName:"statistics" type:"list" required:"true"`
+
+ // The unit for the metric data request.
+ //
+ // Valid units depend on the metric data being requested. For the valid units
+ // with each available metric, see the metricName parameter.
+ //
+ // Unit is a required field
+ Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricDataInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricDataInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketMetricDataInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricDataInput"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+ if s.EndTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndTime"))
+ }
+ if s.MetricName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricName"))
+ }
+ if s.Period == nil {
+ invalidParams.Add(request.NewErrParamRequired("Period"))
+ }
+ if s.Period != nil && *s.Period < 60 {
+ invalidParams.Add(request.NewErrParamMinValue("Period", 60))
+ }
+ if s.StartTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("StartTime"))
+ }
+ if s.Statistics == nil {
+ invalidParams.Add(request.NewErrParamRequired("Statistics"))
+ }
+ if s.Unit == nil {
+ invalidParams.Add(request.NewErrParamRequired("Unit"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *GetBucketMetricDataInput) SetBucketName(v string) *GetBucketMetricDataInput {
+ s.BucketName = &v
+ return s
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetBucketMetricDataInput) SetEndTime(v time.Time) *GetBucketMetricDataInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetBucketMetricDataInput) SetMetricName(v string) *GetBucketMetricDataInput {
+ s.MetricName = &v
+ return s
+}
+
+// SetPeriod sets the Period field's value.
+func (s *GetBucketMetricDataInput) SetPeriod(v int64) *GetBucketMetricDataInput {
+ s.Period = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetBucketMetricDataInput) SetStartTime(v time.Time) *GetBucketMetricDataInput {
+ s.StartTime = &v
+ return s
+}
+
+// SetStatistics sets the Statistics field's value.
+func (s *GetBucketMetricDataInput) SetStatistics(v []*string) *GetBucketMetricDataInput {
+ s.Statistics = v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *GetBucketMetricDataInput) SetUnit(v string) *GetBucketMetricDataInput {
+ s.Unit = &v
+ return s
+}
+
+type GetBucketMetricDataOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the metric data returned.
+ MetricData []*MetricDatapoint `locationName:"metricData" type:"list"`
+
+ // The name of the metric returned.
+ MetricName *string `locationName:"metricName" type:"string" enum:"BucketMetricName"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricDataOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricDataOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricData sets the MetricData field's value.
+func (s *GetBucketMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetBucketMetricDataOutput {
+ s.MetricData = v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetBucketMetricDataOutput) SetMetricName(v string) *GetBucketMetricDataOutput {
+ s.MetricName = &v
+ return s
+}
+
+type GetBucketsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket for which to return information.
+ //
+ // When omitted, the response includes all of your buckets in the AWS Region
+ // where the request is made.
+ BucketName *string `locationName:"bucketName" min:"3" type:"string"`
+
+ // A Boolean value that indicates whether to include Lightsail instances that
+ // were given access to the bucket using the SetResourceAccessForBucket action.
+ IncludeConnectedResources *bool `locationName:"includeConnectedResources" type:"boolean"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetBuckets request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetBucketsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketsInput"}
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *GetBucketsInput) SetBucketName(v string) *GetBucketsInput {
+ s.BucketName = &v
+ return s
+}
+
+// SetIncludeConnectedResources sets the IncludeConnectedResources field's value.
+func (s *GetBucketsInput) SetIncludeConnectedResources(v bool) *GetBucketsInput {
+ s.IncludeConnectedResources = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetBucketsInput) SetPageToken(v string) *GetBucketsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetBucketsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe buckets.
+ Buckets []*Bucket `locationName:"buckets" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetBuckets request and specify
+ // the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetBucketsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBuckets sets the Buckets field's value.
+func (s *GetBucketsOutput) SetBuckets(v []*Bucket) *GetBucketsOutput {
+ s.Buckets = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetBucketsOutput) SetNextPageToken(v string) *GetBucketsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetBundlesInput struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value that indicates whether to include inactive bundle results
+ // in your request.
+ IncludeInactive *bool `locationName:"includeInactive" type:"boolean"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetBundles request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetBundlesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBundlesInput) GoString() string {
+ return s.String()
+}
+
+// SetIncludeInactive sets the IncludeInactive field's value.
+func (s *GetBundlesInput) SetIncludeInactive(v bool) *GetBundlesInput {
+ s.IncludeInactive = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetBundlesInput) SetPageToken(v string) *GetBundlesInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetBundlesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs that contains information about the available
+ // bundles.
+ Bundles []*Bundle `locationName:"bundles" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetBundles request and specify
+ // the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetBundlesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBundlesOutput) GoString() string {
+ return s.String()
+}
+
+// SetBundles sets the Bundles field's value.
+func (s *GetBundlesOutput) SetBundles(v []*Bundle) *GetBundlesOutput {
+ s.Bundles = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetBundlesOutput) SetNextPageToken(v string) *GetBundlesOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetCertificatesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name for the certificate for which to return information.
+ //
+ // When omitted, the response includes all of your certificates in the AWS Region
+ // where the request is made.
+ CertificateName *string `locationName:"certificateName" type:"string"`
+
+ // The status of the certificates for which to return information.
+ //
+ // For example, specify ISSUED to return only certificates with an ISSUED status.
+ //
+ // When omitted, the response includes all of your certificates in the AWS Region
+ // where the request is made, regardless of their current status.
+ CertificateStatuses []*string `locationName:"certificateStatuses" type:"list"`
+
+ // Indicates whether to include detailed information about the certificates
+ // in the response.
+ //
+ // When omitted, the response includes only the certificate names, Amazon Resource
+ // Names (ARNs), domain names, and tags.
+ IncludeCertificateDetails *bool `locationName:"includeCertificateDetails" type:"boolean"`
+}
+
+// String returns the string representation
+func (s GetCertificatesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCertificatesInput) GoString() string {
+ return s.String()
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *GetCertificatesInput) SetCertificateName(v string) *GetCertificatesInput {
+ s.CertificateName = &v
+ return s
+}
+
+// SetCertificateStatuses sets the CertificateStatuses field's value.
+func (s *GetCertificatesInput) SetCertificateStatuses(v []*string) *GetCertificatesInput {
+ s.CertificateStatuses = v
+ return s
+}
+
+// SetIncludeCertificateDetails sets the IncludeCertificateDetails field's value.
+func (s *GetCertificatesInput) SetIncludeCertificateDetails(v bool) *GetCertificatesInput {
+ s.IncludeCertificateDetails = &v
+ return s
+}
+
+type GetCertificatesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes certificates.
+ Certificates []*CertificateSummary `locationName:"certificates" type:"list"`
+}
+
+// String returns the string representation
+func (s GetCertificatesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCertificatesOutput) GoString() string {
+ return s.String()
+}
+
+// SetCertificates sets the Certificates field's value.
+func (s *GetCertificatesOutput) SetCertificates(v []*CertificateSummary) *GetCertificatesOutput {
+ s.Certificates = v
+ return s
+}
+
+type GetCloudFormationStackRecordsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetClouFormationStackRecords request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetCloudFormationStackRecordsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCloudFormationStackRecordsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetCloudFormationStackRecordsInput) SetPageToken(v string) *GetCloudFormationStackRecordsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetCloudFormationStackRecordsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of objects describing the CloudFormation stack records.
+ CloudFormationStackRecords []*CloudFormationStackRecord `locationName:"cloudFormationStackRecords" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetCloudFormationStackRecords
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetCloudFormationStackRecordsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCloudFormationStackRecordsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCloudFormationStackRecords sets the CloudFormationStackRecords field's value.
+func (s *GetCloudFormationStackRecordsOutput) SetCloudFormationStackRecords(v []*CloudFormationStackRecord) *GetCloudFormationStackRecordsOutput {
+ s.CloudFormationStackRecords = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetCloudFormationStackRecordsOutput) SetNextPageToken(v string) *GetCloudFormationStackRecordsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetContactMethodsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The protocols used to send notifications, such as Email, or SMS (text messaging).
+ //
+ // Specify a protocol in your request to return information about a specific
+ // contact method protocol.
+ Protocols []*string `locationName:"protocols" type:"list"`
+}
+
+// String returns the string representation
+func (s GetContactMethodsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContactMethodsInput) GoString() string {
+ return s.String()
+}
+
+// SetProtocols sets the Protocols field's value.
+func (s *GetContactMethodsInput) SetProtocols(v []*string) *GetContactMethodsInput {
+ s.Protocols = v
+ return s
+}
+
+type GetContactMethodsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the contact methods.
+ ContactMethods []*ContactMethod `locationName:"contactMethods" type:"list"`
+}
+
+// String returns the string representation
+func (s GetContactMethodsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContactMethodsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContactMethods sets the ContactMethods field's value.
+func (s *GetContactMethodsOutput) SetContactMethods(v []*ContactMethod) *GetContactMethodsOutput {
+ s.ContactMethods = v
+ return s
+}
+
+type GetContainerAPIMetadataInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetContainerAPIMetadataInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerAPIMetadataInput) GoString() string {
+ return s.String()
+}
+
+type GetContainerAPIMetadataOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Metadata about Lightsail containers, such as the current version of the Lightsail
+ // Control (lightsailctl) plugin.
+ Metadata []map[string]*string `locationName:"metadata" type:"list"`
+}
+
+// String returns the string representation
+func (s GetContainerAPIMetadataOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerAPIMetadataOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *GetContainerAPIMetadataOutput) SetMetadata(v []map[string]*string) *GetContainerAPIMetadataOutput {
+ s.Metadata = v
+ return s
+}
+
+type GetContainerImagesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container service for which to return registered container
+ // images.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetContainerImagesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerImagesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetContainerImagesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetContainerImagesInput"}
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *GetContainerImagesInput) SetServiceName(v string) *GetContainerImagesInput {
+ s.ServiceName = &v
+ return s
+}
+
+type GetContainerImagesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe container images that are registered to
+ // the container service.
+ ContainerImages []*ContainerImage `locationName:"containerImages" type:"list"`
+}
+
+// String returns the string representation
+func (s GetContainerImagesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerImagesOutput) GoString() string {
+ return s.String()
+}
+
+// SetContainerImages sets the ContainerImages field's value.
+func (s *GetContainerImagesOutput) SetContainerImages(v []*ContainerImage) *GetContainerImagesOutput {
+ s.ContainerImages = v
+ return s
+}
+
+type GetContainerLogInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container that is either running or previously ran on the
+ // container service for which to return a log.
+ //
+ // ContainerName is a required field
+ ContainerName *string `locationName:"containerName" type:"string" required:"true"`
+
+ // The end of the time interval for which to get log data.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use an
+ // end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end
+ // time.
+ //
+ // You can convert a human-friendly time to Unix time format using a converter
+ // like Epoch converter (https://www.epochconverter.com/).
+ EndTime *time.Time `locationName:"endTime" type:"timestamp"`
+
+ // The pattern to use to filter the returned log events to a specific term.
+ //
+ // The following are a few examples of filter patterns that you can specify:
+ //
+ // * To return all log events, specify a filter pattern of "".
+ //
+ // * To exclude log events that contain the ERROR term, and return all other
+ // log events, specify a filter pattern of "-ERROR".
+ //
+ // * To return log events that contain the ERROR term, specify a filter pattern
+ // of "ERROR".
+ //
+ // * To return log events that contain both the ERROR and Exception terms,
+ // specify a filter pattern of "ERROR Exception".
+ //
+ // * To return log events that contain the ERROR or the Exception term, specify
+ // a filter pattern of "?ERROR ?Exception".
+ FilterPattern *string `locationName:"filterPattern" type:"string"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetContainerLog request. If your
+ // results are paginated, the response will return a next page token that you
+ // can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+
+ // The name of the container service for which to get a container log.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+
+ // The start of the time interval for which to get log data.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use a
+ // start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the
+ // start time.
+ //
+ // You can convert a human-friendly time to Unix time format using a converter
+ // like Epoch converter (https://www.epochconverter.com/).
+ StartTime *time.Time `locationName:"startTime" type:"timestamp"`
+}
+
+// String returns the string representation
+func (s GetContainerLogInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerLogInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetContainerLogInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetContainerLogInput"}
+ if s.ContainerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ContainerName"))
+ }
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetContainerName sets the ContainerName field's value.
+func (s *GetContainerLogInput) SetContainerName(v string) *GetContainerLogInput {
+ s.ContainerName = &v
+ return s
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetContainerLogInput) SetEndTime(v time.Time) *GetContainerLogInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetFilterPattern sets the FilterPattern field's value.
+func (s *GetContainerLogInput) SetFilterPattern(v string) *GetContainerLogInput {
+ s.FilterPattern = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetContainerLogInput) SetPageToken(v string) *GetContainerLogInput {
+ s.PageToken = &v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *GetContainerLogInput) SetServiceName(v string) *GetContainerLogInput {
+ s.ServiceName = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetContainerLogInput) SetStartTime(v time.Time) *GetContainerLogInput {
+ s.StartTime = &v
+ return s
+}
+
+type GetContainerLogOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the log events of a container.
+ LogEvents []*ContainerServiceLogEvent `locationName:"logEvents" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetContainerLog request
+ // and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetContainerLogOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerLogOutput) GoString() string {
+ return s.String()
+}
+
+// SetLogEvents sets the LogEvents field's value.
+func (s *GetContainerLogOutput) SetLogEvents(v []*ContainerServiceLogEvent) *GetContainerLogOutput {
+ s.LogEvents = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetContainerLogOutput) SetNextPageToken(v string) *GetContainerLogOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetContainerServiceDeploymentsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container service for which to return deployments.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetContainerServiceDeploymentsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServiceDeploymentsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetContainerServiceDeploymentsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetContainerServiceDeploymentsInput"}
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *GetContainerServiceDeploymentsInput) SetServiceName(v string) *GetContainerServiceDeploymentsInput {
+ s.ServiceName = &v
+ return s
+}
+
+type GetContainerServiceDeploymentsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe deployments for a container service.
+ Deployments []*ContainerServiceDeployment `locationName:"deployments" type:"list"`
+}
+
+// String returns the string representation
+func (s GetContainerServiceDeploymentsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServiceDeploymentsOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeployments sets the Deployments field's value.
+func (s *GetContainerServiceDeploymentsOutput) SetDeployments(v []*ContainerServiceDeployment) *GetContainerServiceDeploymentsOutput {
+ s.Deployments = v
+ return s
+}
+
+type GetContainerServiceMetricDataInput struct {
+ _ struct{} `type:"structure"`
+
+ // The end time of the time period.
+ //
+ // EndTime is a required field
+ EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"`
+
+ // The metric for which you want to return information.
+ //
+ // Valid container service metric names are listed below, along with the most
+ // useful statistics to include in your request, and the published unit value.
+ //
+ // * CPUUtilization - The average percentage of compute units that are currently
+ // in use across all nodes of the container service. This metric identifies
+ // the processing power required to run containers on each node of the container
+ // service. Statistics: The most useful statistics are Maximum and Average.
+ // Unit: The published unit is Percent.
+ //
+ // * MemoryUtilization - The average percentage of available memory that
+ // is currently in use across all nodes of the container service. This metric
+ // identifies the memory required to run containers on each node of the container
+ // service. Statistics: The most useful statistics are Maximum and Average.
+ // Unit: The published unit is Percent.
+ //
+ // MetricName is a required field
+ MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"ContainerServiceMetricName"`
+
+ // The granularity, in seconds, of the returned data points.
+ //
+ // All container service metric data is available in 5-minute (300 seconds)
+ // granularity.
+ //
+ // Period is a required field
+ Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"`
+
+ // The name of the container service for which to get metric data.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+
+ // The start time of the time period.
+ //
+ // StartTime is a required field
+ StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
+
+ // The statistic for the metric.
+ //
+ // The following statistics are available:
+ //
+ // * Minimum - The lowest value observed during the specified period. Use
+ // this value to determine low volumes of activity for your application.
+ //
+ // * Maximum - The highest value observed during the specified period. Use
+ // this value to determine high volumes of activity for your application.
+ //
+ // * Sum - All values submitted for the matching metric added together. You
+ // can use this statistic to determine the total volume of a metric.
+ //
+ // * Average - The value of Sum / SampleCount during the specified period.
+ // By comparing this statistic with the Minimum and Maximum values, you can
+ // determine the full scope of a metric and how close the average use is
+ // to the Minimum and Maximum values. This comparison helps you to know when
+ // to increase or decrease your resources.
+ //
+ // * SampleCount - The count, or number, of data points used for the statistical
+ // calculation.
+ //
+ // Statistics is a required field
+ Statistics []*string `locationName:"statistics" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s GetContainerServiceMetricDataInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServiceMetricDataInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetContainerServiceMetricDataInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetContainerServiceMetricDataInput"}
+ if s.EndTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndTime"))
+ }
+ if s.MetricName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricName"))
+ }
+ if s.Period == nil {
+ invalidParams.Add(request.NewErrParamRequired("Period"))
+ }
+ if s.Period != nil && *s.Period < 60 {
+ invalidParams.Add(request.NewErrParamMinValue("Period", 60))
+ }
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+ if s.StartTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("StartTime"))
+ }
+ if s.Statistics == nil {
+ invalidParams.Add(request.NewErrParamRequired("Statistics"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetContainerServiceMetricDataInput) SetEndTime(v time.Time) *GetContainerServiceMetricDataInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetContainerServiceMetricDataInput) SetMetricName(v string) *GetContainerServiceMetricDataInput {
+ s.MetricName = &v
+ return s
+}
+
+// SetPeriod sets the Period field's value.
+func (s *GetContainerServiceMetricDataInput) SetPeriod(v int64) *GetContainerServiceMetricDataInput {
+ s.Period = &v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *GetContainerServiceMetricDataInput) SetServiceName(v string) *GetContainerServiceMetricDataInput {
+ s.ServiceName = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetContainerServiceMetricDataInput) SetStartTime(v time.Time) *GetContainerServiceMetricDataInput {
+ s.StartTime = &v
+ return s
+}
+
+// SetStatistics sets the Statistics field's value.
+func (s *GetContainerServiceMetricDataInput) SetStatistics(v []*string) *GetContainerServiceMetricDataInput {
+ s.Statistics = v
+ return s
+}
+
+type GetContainerServiceMetricDataOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the metric data returned.
+ MetricData []*MetricDatapoint `locationName:"metricData" type:"list"`
+
+ // The name of the metric returned.
+ MetricName *string `locationName:"metricName" type:"string" enum:"ContainerServiceMetricName"`
+}
+
+// String returns the string representation
+func (s GetContainerServiceMetricDataOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServiceMetricDataOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricData sets the MetricData field's value.
+func (s *GetContainerServiceMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetContainerServiceMetricDataOutput {
+ s.MetricData = v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetContainerServiceMetricDataOutput) SetMetricName(v string) *GetContainerServiceMetricDataOutput {
+ s.MetricName = &v
+ return s
+}
+
+type GetContainerServicePowersInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetContainerServicePowersInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServicePowersInput) GoString() string {
+ return s.String()
+}
+
+type GetContainerServicePowersOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the powers that can be specified for a
+ // container service.
+ Powers []*ContainerServicePower `locationName:"powers" type:"list"`
+}
+
+// String returns the string representation
+func (s GetContainerServicePowersOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServicePowersOutput) GoString() string {
+ return s.String()
+}
+
+// SetPowers sets the Powers field's value.
+func (s *GetContainerServicePowersOutput) SetPowers(v []*ContainerServicePower) *GetContainerServicePowersOutput {
+ s.Powers = v
+ return s
+}
+
+type GetContainerServicesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the container service for which to return information.
+ //
+ // When omitted, the response includes all of your container services in the
+ // AWS Region where the request is made.
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s GetContainerServicesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServicesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetContainerServicesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetContainerServicesInput"}
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *GetContainerServicesInput) SetServiceName(v string) *GetContainerServicesInput {
+ s.ServiceName = &v
+ return s
+}
+
+type GetContainerServicesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe one or more container services.
+ ContainerServices []*ContainerService `locationName:"containerServices" type:"list"`
+}
+
+// String returns the string representation
+func (s GetContainerServicesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetContainerServicesOutput) GoString() string {
+ return s.String()
+}
+
+// SetContainerServices sets the ContainerServices field's value.
+func (s *GetContainerServicesOutput) SetContainerServices(v []*ContainerService) *GetContainerServicesOutput {
+ s.ContainerServices = v
+ return s
+}
+
+type GetDiskInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the disk (e.g., my-disk).
+ //
+ // DiskName is a required field
+ DiskName *string `locationName:"diskName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetDiskInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDiskInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetDiskInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetDiskInput"}
+ if s.DiskName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiskName sets the DiskName field's value.
+func (s *GetDiskInput) SetDiskName(v string) *GetDiskInput {
+ s.DiskName = &v
+ return s
+}
+
+type GetDiskOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object containing information about the disk.
+ Disk *Disk `locationName:"disk" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetDiskOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDiskOutput) GoString() string {
+ return s.String()
+}
+
+// SetDisk sets the Disk field's value.
+func (s *GetDiskOutput) SetDisk(v *Disk) *GetDiskOutput {
+ s.Disk = v
+ return s
+}
+
+type GetDiskSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the disk snapshot (e.g., my-disk-snapshot).
+ //
+ // DiskSnapshotName is a required field
+ DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetDiskSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDiskSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetDiskSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetDiskSnapshotInput"}
+ if s.DiskSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiskSnapshotName sets the DiskSnapshotName field's value.
+func (s *GetDiskSnapshotInput) SetDiskSnapshotName(v string) *GetDiskSnapshotInput {
+ s.DiskSnapshotName = &v
+ return s
+}
+
+type GetDiskSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object containing information about the disk snapshot.
+ DiskSnapshot *DiskSnapshot `locationName:"diskSnapshot" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetDiskSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDiskSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetDiskSnapshot sets the DiskSnapshot field's value.
+func (s *GetDiskSnapshotOutput) SetDiskSnapshot(v *DiskSnapshot) *GetDiskSnapshotOutput {
+ s.DiskSnapshot = v
+ return s
+}
+
+type GetDiskSnapshotsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetDiskSnapshots request. If your
+ // results are paginated, the response will return a next page token that you
+ // can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDiskSnapshotsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDiskSnapshotsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetDiskSnapshotsInput) SetPageToken(v string) *GetDiskSnapshotsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetDiskSnapshotsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects containing information about all block storage disk snapshots.
+ DiskSnapshots []*DiskSnapshot `locationName:"diskSnapshots" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetDiskSnapshots request
+ // and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDiskSnapshotsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDiskSnapshotsOutput) GoString() string {
+ return s.String()
+}
+
+// SetDiskSnapshots sets the DiskSnapshots field's value.
+func (s *GetDiskSnapshotsOutput) SetDiskSnapshots(v []*DiskSnapshot) *GetDiskSnapshotsOutput {
+ s.DiskSnapshots = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetDiskSnapshotsOutput) SetNextPageToken(v string) *GetDiskSnapshotsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetDisksInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetDisks request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDisksInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDisksInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetDisksInput) SetPageToken(v string) *GetDisksInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetDisksOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects containing information about all block storage disks.
+ Disks []*Disk `locationName:"disks" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetDisks request and specify
+ // the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDisksOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDisksOutput) GoString() string {
+ return s.String()
+}
+
+// SetDisks sets the Disks field's value.
+func (s *GetDisksOutput) SetDisks(v []*Disk) *GetDisksOutput {
+ s.Disks = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetDisksOutput) SetNextPageToken(v string) *GetDisksOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetDistributionBundlesInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetDistributionBundlesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionBundlesInput) GoString() string {
+ return s.String()
+}
+
+type GetDistributionBundlesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes a distribution bundle.
+ Bundles []*DistributionBundle `locationName:"bundles" type:"list"`
+}
+
+// String returns the string representation
+func (s GetDistributionBundlesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionBundlesOutput) GoString() string {
+ return s.String()
+}
+
+// SetBundles sets the Bundles field's value.
+func (s *GetDistributionBundlesOutput) SetBundles(v []*DistributionBundle) *GetDistributionBundlesOutput {
+ s.Bundles = v
+ return s
+}
+
+type GetDistributionLatestCacheResetInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the distribution for which to return the timestamp of the last
+ // cache reset.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ //
+ // When omitted, the response includes the latest cache reset timestamp of all
+ // your distributions.
+ DistributionName *string `locationName:"distributionName" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDistributionLatestCacheResetInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionLatestCacheResetInput) GoString() string {
+ return s.String()
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *GetDistributionLatestCacheResetInput) SetDistributionName(v string) *GetDistributionLatestCacheResetInput {
+ s.DistributionName = &v
+ return s
+}
+
+type GetDistributionLatestCacheResetOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp of the last cache reset (e.g., 1479734909.17) in Unix time
+ // format.
+ CreateTime *time.Time `locationName:"createTime" type:"timestamp"`
+
+ // The status of the last cache reset.
+ Status *string `locationName:"status" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDistributionLatestCacheResetOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionLatestCacheResetOutput) GoString() string {
+ return s.String()
+}
+
+// SetCreateTime sets the CreateTime field's value.
+func (s *GetDistributionLatestCacheResetOutput) SetCreateTime(v time.Time) *GetDistributionLatestCacheResetOutput {
+ s.CreateTime = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetDistributionLatestCacheResetOutput) SetStatus(v string) *GetDistributionLatestCacheResetOutput {
+ s.Status = &v
+ return s
+}
+
+type GetDistributionMetricDataInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the distribution for which to get metric data.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ //
+ // DistributionName is a required field
+ DistributionName *string `locationName:"distributionName" type:"string" required:"true"`
+
+ // The end of the time interval for which to get metric data.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use an
+ // end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end
+ // time.
+ //
+ // You can convert a human-friendly time to Unix time format using a converter
+ // like Epoch converter (https://www.epochconverter.com/).
+ //
+ // EndTime is a required field
+ EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"`
+
+ // The metric for which you want to return information.
+ //
+ // Valid distribution metric names are listed below, along with the most useful
+ // statistics to include in your request, and the published unit value.
+ //
+ // * Requests - The total number of viewer requests received by your Lightsail
+ // distribution, for all HTTP methods, and for both HTTP and HTTPS requests.
+ // Statistics: The most useful statistic is Sum. Unit: The published unit
+ // is None.
+ //
+ // * BytesDownloaded - The number of bytes downloaded by viewers for GET,
+ // HEAD, and OPTIONS requests. Statistics: The most useful statistic is Sum.
+ // Unit: The published unit is None.
+ //
+ // * BytesUploaded - The number of bytes uploaded to your origin by your
+ // Lightsail distribution, using POST and PUT requests. Statistics: The most
+ // useful statistic is Sum. Unit: The published unit is None.
+ //
+ // * TotalErrorRate - The percentage of all viewer requests for which the
+ // response's HTTP status code was 4xx or 5xx. Statistics: The most useful
+ // statistic is Average. Unit: The published unit is Percent.
+ //
+ // * 4xxErrorRate - The percentage of all viewer requests for which the response's
+ // HTTP status cod was 4xx. In these cases, the client or client viewer may
+ // have made an error. For example, a status code of 404 (Not Found) means
+ // that the client requested an object that could not be found. Statistics:
+ // The most useful statistic is Average. Unit: The published unit is Percent.
+ //
+ // * 5xxErrorRate - The percentage of all viewer requests for which the response's
+ // HTTP status code was 5xx. In these cases, the origin server did not satisfy
+ // the requests. For example, a status code of 503 (Service Unavailable)
+ // means that the origin server is currently unavailable. Statistics: The
+ // most useful statistic is Average. Unit: The published unit is Percent.
+ //
+ // MetricName is a required field
+ MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"DistributionMetricName"`
+
+ // The granularity, in seconds, for the metric data points that will be returned.
+ //
+ // Period is a required field
+ Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"`
+
+ // The start of the time interval for which to get metric data.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use a
+ // start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the
+ // start time.
+ //
+ // You can convert a human-friendly time to Unix time format using a converter
+ // like Epoch converter (https://www.epochconverter.com/).
+ //
+ // StartTime is a required field
+ StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
+
+ // The statistic for the metric.
+ //
+ // The following statistics are available:
+ //
+ // * Minimum - The lowest value observed during the specified period. Use
+ // this value to determine low volumes of activity for your application.
+ //
+ // * Maximum - The highest value observed during the specified period. Use
+ // this value to determine high volumes of activity for your application.
+ //
+ // * Sum - All values submitted for the matching metric added together. You
+ // can use this statistic to determine the total volume of a metric.
+ //
+ // * Average - The value of Sum / SampleCount during the specified period.
+ // By comparing this statistic with the Minimum and Maximum values, you can
+ // determine the full scope of a metric and how close the average use is
+ // to the Minimum and Maximum values. This comparison helps you to know when
+ // to increase or decrease your resources.
+ //
+ // * SampleCount - The count, or number, of data points used for the statistical
+ // calculation.
+ //
+ // Statistics is a required field
+ Statistics []*string `locationName:"statistics" type:"list" required:"true"`
+
+ // The unit for the metric data request.
+ //
+ // Valid units depend on the metric data being requested. For the valid units
+ // with each available metric, see the metricName parameter.
+ //
+ // Unit is a required field
+ Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"`
+}
+
+// String returns the string representation
+func (s GetDistributionMetricDataInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionMetricDataInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetDistributionMetricDataInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetDistributionMetricDataInput"}
+ if s.DistributionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DistributionName"))
+ }
+ if s.EndTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndTime"))
+ }
+ if s.MetricName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricName"))
+ }
+ if s.Period == nil {
+ invalidParams.Add(request.NewErrParamRequired("Period"))
+ }
+ if s.Period != nil && *s.Period < 60 {
+ invalidParams.Add(request.NewErrParamMinValue("Period", 60))
+ }
+ if s.StartTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("StartTime"))
+ }
+ if s.Statistics == nil {
+ invalidParams.Add(request.NewErrParamRequired("Statistics"))
+ }
+ if s.Unit == nil {
+ invalidParams.Add(request.NewErrParamRequired("Unit"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *GetDistributionMetricDataInput) SetDistributionName(v string) *GetDistributionMetricDataInput {
+ s.DistributionName = &v
+ return s
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetDistributionMetricDataInput) SetEndTime(v time.Time) *GetDistributionMetricDataInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetDistributionMetricDataInput) SetMetricName(v string) *GetDistributionMetricDataInput {
+ s.MetricName = &v
+ return s
+}
+
+// SetPeriod sets the Period field's value.
+func (s *GetDistributionMetricDataInput) SetPeriod(v int64) *GetDistributionMetricDataInput {
+ s.Period = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetDistributionMetricDataInput) SetStartTime(v time.Time) *GetDistributionMetricDataInput {
+ s.StartTime = &v
+ return s
+}
+
+// SetStatistics sets the Statistics field's value.
+func (s *GetDistributionMetricDataInput) SetStatistics(v []*string) *GetDistributionMetricDataInput {
+ s.Statistics = v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *GetDistributionMetricDataInput) SetUnit(v string) *GetDistributionMetricDataInput {
+ s.Unit = &v
+ return s
+}
+
+type GetDistributionMetricDataOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the metric data returned.
+ MetricData []*MetricDatapoint `locationName:"metricData" type:"list"`
+
+ // The name of the metric returned.
+ MetricName *string `locationName:"metricName" type:"string" enum:"DistributionMetricName"`
+}
+
+// String returns the string representation
+func (s GetDistributionMetricDataOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionMetricDataOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricData sets the MetricData field's value.
+func (s *GetDistributionMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetDistributionMetricDataOutput {
+ s.MetricData = v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetDistributionMetricDataOutput) SetMetricName(v string) *GetDistributionMetricDataOutput {
+ s.MetricName = &v
+ return s
+}
+
+type GetDistributionsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the distribution for which to return information.
+ //
+ // When omitted, the response includes all of your distributions in the AWS
+ // Region where the request is made.
+ DistributionName *string `locationName:"distributionName" type:"string"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetDistributions request. If your
+ // results are paginated, the response will return a next page token that you
+ // can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDistributionsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionsInput) GoString() string {
+ return s.String()
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *GetDistributionsInput) SetDistributionName(v string) *GetDistributionsInput {
+ s.DistributionName = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetDistributionsInput) SetPageToken(v string) *GetDistributionsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetDistributionsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe your distributions.
+ Distributions []*LightsailDistribution `locationName:"distributions" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetDistributions request
+ // and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDistributionsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDistributionsOutput) GoString() string {
+ return s.String()
+}
+
+// SetDistributions sets the Distributions field's value.
+func (s *GetDistributionsOutput) SetDistributions(v []*LightsailDistribution) *GetDistributionsOutput {
+ s.Distributions = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetDistributionsOutput) SetNextPageToken(v string) *GetDistributionsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetDomainInput struct {
+ _ struct{} `type:"structure"`
+
+ // The domain name for which your want to return information about.
+ //
+ // DomainName is a required field
+ DomainName *string `locationName:"domainName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetDomainInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDomainInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetDomainInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetDomainInput"}
+ if s.DomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *GetDomainInput) SetDomainName(v string) *GetDomainInput {
+ s.DomainName = &v
+ return s
+}
+
+type GetDomainOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about your get domain
+ // request.
+ Domain *Domain `locationName:"domain" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetDomainOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDomainOutput) GoString() string {
+ return s.String()
+}
+
+// SetDomain sets the Domain field's value.
+func (s *GetDomainOutput) SetDomain(v *Domain) *GetDomainOutput {
+ s.Domain = v
+ return s
+}
+
+type GetDomainsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetDomains request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDomainsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDomainsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetDomainsInput) SetPageToken(v string) *GetDomainsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetDomainsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about each of the domain
+ // entries in the user's account.
+ Domains []*Domain `locationName:"domains" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetDomains request and specify
+ // the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDomainsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDomainsOutput) GoString() string {
+ return s.String()
+}
+
+// SetDomains sets the Domains field's value.
+func (s *GetDomainsOutput) SetDomains(v []*Domain) *GetDomainsOutput {
+ s.Domains = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetDomainsOutput) SetNextPageToken(v string) *GetDomainsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetExportSnapshotRecordsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetExportSnapshotRecords request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetExportSnapshotRecordsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetExportSnapshotRecordsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetExportSnapshotRecordsInput) SetPageToken(v string) *GetExportSnapshotRecordsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetExportSnapshotRecordsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of objects describing the export snapshot records.
+ ExportSnapshotRecords []*ExportSnapshotRecord `locationName:"exportSnapshotRecords" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetExportSnapshotRecords
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetExportSnapshotRecordsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetExportSnapshotRecordsOutput) GoString() string {
+ return s.String()
+}
+
+// SetExportSnapshotRecords sets the ExportSnapshotRecords field's value.
+func (s *GetExportSnapshotRecordsOutput) SetExportSnapshotRecords(v []*ExportSnapshotRecord) *GetExportSnapshotRecordsOutput {
+ s.ExportSnapshotRecords = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetExportSnapshotRecordsOutput) SetNextPageToken(v string) *GetExportSnapshotRecordsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetInstanceAccessDetailsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance to access.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+
+ // The protocol to use to connect to your instance. Defaults to ssh.
+ Protocol *string `locationName:"protocol" type:"string" enum:"InstanceAccessProtocol"`
+}
+
+// String returns the string representation
+func (s GetInstanceAccessDetailsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceAccessDetailsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetInstanceAccessDetailsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetInstanceAccessDetailsInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *GetInstanceAccessDetailsInput) SetInstanceName(v string) *GetInstanceAccessDetailsInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *GetInstanceAccessDetailsInput) SetProtocol(v string) *GetInstanceAccessDetailsInput {
+ s.Protocol = &v
+ return s
+}
+
+type GetInstanceAccessDetailsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about a get instance access
+ // request.
+ AccessDetails *InstanceAccessDetails `locationName:"accessDetails" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetInstanceAccessDetailsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceAccessDetailsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccessDetails sets the AccessDetails field's value.
+func (s *GetInstanceAccessDetailsOutput) SetAccessDetails(v *InstanceAccessDetails) *GetInstanceAccessDetailsOutput {
+ s.AccessDetails = v
+ return s
+}
+
+type GetInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetInstanceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetInstanceInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *GetInstanceInput) SetInstanceName(v string) *GetInstanceInput {
+ s.InstanceName = &v
+ return s
+}
+
+type GetInstanceMetricDataInput struct {
+ _ struct{} `type:"structure"`
+
+ // The end time of the time period.
+ //
+ // EndTime is a required field
+ EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"`
+
+ // The name of the instance for which you want to get metrics data.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+
+ // The metric for which you want to return information.
+ //
+ // Valid instance metric names are listed below, along with the most useful
+ // statistics to include in your request, and the published unit value.
+ //
+ // * BurstCapacityPercentage - The percentage of CPU performance available
+ // for your instance to burst above its baseline. Your instance continuously
+ // accrues and consumes burst capacity. Burst capacity stops accruing when
+ // your instance's BurstCapacityPercentage reaches 100%. For more information,
+ // see Viewing instance burst capacity in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-viewing-instance-burst-capacity).
+ // Statistics: The most useful statistics are Maximum and Average. Unit:
+ // The published unit is Percent.
+ //
+ // * BurstCapacityTime - The available amount of time for your instance to
+ // burst at 100% CPU utilization. Your instance continuously accrues and
+ // consumes burst capacity. Burst capacity time stops accruing when your
+ // instance's BurstCapacityPercentage metric reaches 100%. Burst capacity
+ // time is consumed at the full rate only when your instance operates at
+ // 100% CPU utilization. For example, if your instance operates at 50% CPU
+ // utilization in the burstable zone for a 5-minute period, then it consumes
+ // CPU burst capacity minutes at a 50% rate in that period. Your instance
+ // consumed 2 minutes and 30 seconds of CPU burst capacity minutes in the
+ // 5-minute period. For more information, see Viewing instance burst capacity
+ // in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-viewing-instance-burst-capacity).
+ // Statistics: The most useful statistics are Maximum and Average. Unit:
+ // The published unit is Seconds.
+ //
+ // * CPUUtilization - The percentage of allocated compute units that are
+ // currently in use on the instance. This metric identifies the processing
+ // power to run the applications on the instance. Tools in your operating
+ // system can show a lower percentage than Lightsail when the instance is
+ // not allocated a full processor core. Statistics: The most useful statistics
+ // are Maximum and Average. Unit: The published unit is Percent.
+ //
+ // * NetworkIn - The number of bytes received on all network interfaces by
+ // the instance. This metric identifies the volume of incoming network traffic
+ // to the instance. The number reported is the number of bytes received during
+ // the period. Because this metric is reported in 5-minute intervals, divide
+ // the reported number by 300 to find Bytes/second. Statistics: The most
+ // useful statistic is Sum. Unit: The published unit is Bytes.
+ //
+ // * NetworkOut - The number of bytes sent out on all network interfaces
+ // by the instance. This metric identifies the volume of outgoing network
+ // traffic from the instance. The number reported is the number of bytes
+ // sent during the period. Because this metric is reported in 5-minute intervals,
+ // divide the reported number by 300 to find Bytes/second. Statistics: The
+ // most useful statistic is Sum. Unit: The published unit is Bytes.
+ //
+ // * StatusCheckFailed - Reports whether the instance passed or failed both
+ // the instance status check and the system status check. This metric can
+ // be either 0 (passed) or 1 (failed). This metric data is available in 1-minute
+ // (60 seconds) granularity. Statistics: The most useful statistic is Sum.
+ // Unit: The published unit is Count.
+ //
+ // * StatusCheckFailed_Instance - Reports whether the instance passed or
+ // failed the instance status check. This metric can be either 0 (passed)
+ // or 1 (failed). This metric data is available in 1-minute (60 seconds)
+ // granularity. Statistics: The most useful statistic is Sum. Unit: The published
+ // unit is Count.
+ //
+ // * StatusCheckFailed_System - Reports whether the instance passed or failed
+ // the system status check. This metric can be either 0 (passed) or 1 (failed).
+ // This metric data is available in 1-minute (60 seconds) granularity. Statistics:
+ // The most useful statistic is Sum. Unit: The published unit is Count.
+ //
+ // MetricName is a required field
+ MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"InstanceMetricName"`
+
+ // The granularity, in seconds, of the returned data points.
+ //
+ // The StatusCheckFailed, StatusCheckFailed_Instance, and StatusCheckFailed_System
+ // instance metric data is available in 1-minute (60 seconds) granularity. All
+ // other instance metric data is available in 5-minute (300 seconds) granularity.
+ //
+ // Period is a required field
+ Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"`
+
+ // The start time of the time period.
+ //
+ // StartTime is a required field
+ StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
+
+ // The statistic for the metric.
+ //
+ // The following statistics are available:
+ //
+ // * Minimum - The lowest value observed during the specified period. Use
+ // this value to determine low volumes of activity for your application.
+ //
+ // * Maximum - The highest value observed during the specified period. Use
+ // this value to determine high volumes of activity for your application.
+ //
+ // * Sum - All values submitted for the matching metric added together. You
+ // can use this statistic to determine the total volume of a metric.
+ //
+ // * Average - The value of Sum / SampleCount during the specified period.
+ // By comparing this statistic with the Minimum and Maximum values, you can
+ // determine the full scope of a metric and how close the average use is
+ // to the Minimum and Maximum values. This comparison helps you to know when
+ // to increase or decrease your resources.
+ //
+ // * SampleCount - The count, or number, of data points used for the statistical
+ // calculation.
+ //
+ // Statistics is a required field
+ Statistics []*string `locationName:"statistics" type:"list" required:"true"`
+
+ // The unit for the metric data request. Valid units depend on the metric data
+ // being requested. For the valid units to specify with each available metric,
+ // see the metricName parameter.
+ //
+ // Unit is a required field
+ Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"`
+}
+
+// String returns the string representation
+func (s GetInstanceMetricDataInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceMetricDataInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetInstanceMetricDataInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetInstanceMetricDataInput"}
+ if s.EndTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndTime"))
+ }
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+ if s.MetricName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricName"))
+ }
+ if s.Period == nil {
+ invalidParams.Add(request.NewErrParamRequired("Period"))
+ }
+ if s.Period != nil && *s.Period < 60 {
+ invalidParams.Add(request.NewErrParamMinValue("Period", 60))
+ }
+ if s.StartTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("StartTime"))
+ }
+ if s.Statistics == nil {
+ invalidParams.Add(request.NewErrParamRequired("Statistics"))
+ }
+ if s.Unit == nil {
+ invalidParams.Add(request.NewErrParamRequired("Unit"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetInstanceMetricDataInput) SetEndTime(v time.Time) *GetInstanceMetricDataInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *GetInstanceMetricDataInput) SetInstanceName(v string) *GetInstanceMetricDataInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetInstanceMetricDataInput) SetMetricName(v string) *GetInstanceMetricDataInput {
+ s.MetricName = &v
+ return s
+}
+
+// SetPeriod sets the Period field's value.
+func (s *GetInstanceMetricDataInput) SetPeriod(v int64) *GetInstanceMetricDataInput {
+ s.Period = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetInstanceMetricDataInput) SetStartTime(v time.Time) *GetInstanceMetricDataInput {
+ s.StartTime = &v
+ return s
+}
+
+// SetStatistics sets the Statistics field's value.
+func (s *GetInstanceMetricDataInput) SetStatistics(v []*string) *GetInstanceMetricDataInput {
+ s.Statistics = v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *GetInstanceMetricDataInput) SetUnit(v string) *GetInstanceMetricDataInput {
+ s.Unit = &v
+ return s
+}
+
+type GetInstanceMetricDataOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the metric data returned.
+ MetricData []*MetricDatapoint `locationName:"metricData" type:"list"`
+
+ // The name of the metric returned.
+ MetricName *string `locationName:"metricName" type:"string" enum:"InstanceMetricName"`
+}
+
+// String returns the string representation
+func (s GetInstanceMetricDataOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceMetricDataOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricData sets the MetricData field's value.
+func (s *GetInstanceMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetInstanceMetricDataOutput {
+ s.MetricData = v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetInstanceMetricDataOutput) SetMetricName(v string) *GetInstanceMetricDataOutput {
+ s.MetricName = &v
+ return s
+}
+
+type GetInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the specified instance.
+ Instance *Instance `locationName:"instance" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstance sets the Instance field's value.
+func (s *GetInstanceOutput) SetInstance(v *Instance) *GetInstanceOutput {
+ s.Instance = v
+ return s
+}
+
+type GetInstancePortStatesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance for which to return firewall port states.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetInstancePortStatesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstancePortStatesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetInstancePortStatesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetInstancePortStatesInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *GetInstancePortStatesInput) SetInstanceName(v string) *GetInstancePortStatesInput {
+ s.InstanceName = &v
+ return s
+}
+
+type GetInstancePortStatesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the firewall port states for the specified
+ // instance.
+ PortStates []*InstancePortState `locationName:"portStates" type:"list"`
+}
+
+// String returns the string representation
+func (s GetInstancePortStatesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstancePortStatesOutput) GoString() string {
+ return s.String()
+}
+
+// SetPortStates sets the PortStates field's value.
+func (s *GetInstancePortStatesOutput) SetPortStates(v []*InstancePortState) *GetInstancePortStatesOutput {
+ s.PortStates = v
+ return s
+}
+
+type GetInstanceSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the snapshot for which you are requesting information.
+ //
+ // InstanceSnapshotName is a required field
+ InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetInstanceSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetInstanceSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetInstanceSnapshotInput"}
+ if s.InstanceSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceSnapshotName sets the InstanceSnapshotName field's value.
+func (s *GetInstanceSnapshotInput) SetInstanceSnapshotName(v string) *GetInstanceSnapshotInput {
+ s.InstanceSnapshotName = &v
+ return s
+}
+
+type GetInstanceSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the results of your
+ // get instance snapshot request.
+ InstanceSnapshot *InstanceSnapshot `locationName:"instanceSnapshot" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetInstanceSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceSnapshot sets the InstanceSnapshot field's value.
+func (s *GetInstanceSnapshotOutput) SetInstanceSnapshot(v *InstanceSnapshot) *GetInstanceSnapshotOutput {
+ s.InstanceSnapshot = v
+ return s
+}
+
+type GetInstanceSnapshotsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetInstanceSnapshots request. If
+ // your results are paginated, the response will return a next page token that
+ // you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetInstanceSnapshotsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceSnapshotsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetInstanceSnapshotsInput) SetPageToken(v string) *GetInstanceSnapshotsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetInstanceSnapshotsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the results of your
+ // get instance snapshots request.
+ InstanceSnapshots []*InstanceSnapshot `locationName:"instanceSnapshots" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetInstanceSnapshots request
+ // and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetInstanceSnapshotsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceSnapshotsOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceSnapshots sets the InstanceSnapshots field's value.
+func (s *GetInstanceSnapshotsOutput) SetInstanceSnapshots(v []*InstanceSnapshot) *GetInstanceSnapshotsOutput {
+ s.InstanceSnapshots = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetInstanceSnapshotsOutput) SetNextPageToken(v string) *GetInstanceSnapshotsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetInstanceStateInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance to get state information about.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetInstanceStateInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceStateInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetInstanceStateInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetInstanceStateInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *GetInstanceStateInput) SetInstanceName(v string) *GetInstanceStateInput {
+ s.InstanceName = &v
+ return s
+}
+
+type GetInstanceStateOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The state of the instance.
+ State *InstanceState `locationName:"state" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetInstanceStateOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstanceStateOutput) GoString() string {
+ return s.String()
+}
+
+// SetState sets the State field's value.
+func (s *GetInstanceStateOutput) SetState(v *InstanceState) *GetInstanceStateOutput {
+ s.State = v
+ return s
+}
+
+type GetInstancesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetInstances request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetInstancesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstancesInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetInstancesInput) SetPageToken(v string) *GetInstancesInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetInstancesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about your instances.
+ Instances []*Instance `locationName:"instances" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetInstances request and
+ // specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetInstancesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetInstancesOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstances sets the Instances field's value.
+func (s *GetInstancesOutput) SetInstances(v []*Instance) *GetInstancesOutput {
+ s.Instances = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetInstancesOutput) SetNextPageToken(v string) *GetInstancesOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetKeyPairInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the key pair for which you are requesting information.
+ //
+ // KeyPairName is a required field
+ KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetKeyPairInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetKeyPairInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetKeyPairInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetKeyPairInput"}
+ if s.KeyPairName == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyPairName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKeyPairName sets the KeyPairName field's value.
+func (s *GetKeyPairInput) SetKeyPairName(v string) *GetKeyPairInput {
+ s.KeyPairName = &v
+ return s
+}
+
+type GetKeyPairOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the key pair.
+ KeyPair *KeyPair `locationName:"keyPair" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetKeyPairOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetKeyPairOutput) GoString() string {
+ return s.String()
+}
+
+// SetKeyPair sets the KeyPair field's value.
+func (s *GetKeyPairOutput) SetKeyPair(v *KeyPair) *GetKeyPairOutput {
+ s.KeyPair = v
+ return s
+}
+
+type GetKeyPairsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetKeyPairs request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetKeyPairsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetKeyPairsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetKeyPairsInput) SetPageToken(v string) *GetKeyPairsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetKeyPairsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the key pairs.
+ KeyPairs []*KeyPair `locationName:"keyPairs" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetKeyPairs request and
+ // specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetKeyPairsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetKeyPairsOutput) GoString() string {
+ return s.String()
+}
+
+// SetKeyPairs sets the KeyPairs field's value.
+func (s *GetKeyPairsOutput) SetKeyPairs(v []*KeyPair) *GetKeyPairsOutput {
+ s.KeyPairs = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetKeyPairsOutput) SetNextPageToken(v string) *GetKeyPairsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetLoadBalancerInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the load balancer.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancerInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancerInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetLoadBalancerInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetLoadBalancerInput"}
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *GetLoadBalancerInput) SetLoadBalancerName(v string) *GetLoadBalancerInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type GetLoadBalancerMetricDataInput struct {
+ _ struct{} `type:"structure"`
+
+ // The end time of the period.
+ //
+ // EndTime is a required field
+ EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"`
+
+ // The name of the load balancer.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+
+ // The metric for which you want to return information.
+ //
+ // Valid load balancer metric names are listed below, along with the most useful
+ // statistics to include in your request, and the published unit value.
+ //
+ // * ClientTLSNegotiationErrorCount - The number of TLS connections initiated
+ // by the client that did not establish a session with the load balancer
+ // due to a TLS error generated by the load balancer. Possible causes include
+ // a mismatch of ciphers or protocols. Statistics: The most useful statistic
+ // is Sum. Unit: The published unit is Count.
+ //
+ // * HealthyHostCount - The number of target instances that are considered
+ // healthy. Statistics: The most useful statistic are Average, Minimum, and
+ // Maximum. Unit: The published unit is Count.
+ //
+ // * HTTPCode_Instance_2XX_Count - The number of HTTP 2XX response codes
+ // generated by the target instances. This does not include any response
+ // codes generated by the load balancer. Statistics: The most useful statistic
+ // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The
+ // published unit is Count.
+ //
+ // * HTTPCode_Instance_3XX_Count - The number of HTTP 3XX response codes
+ // generated by the target instances. This does not include any response
+ // codes generated by the load balancer. Statistics: The most useful statistic
+ // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The
+ // published unit is Count.
+ //
+ // * HTTPCode_Instance_4XX_Count - The number of HTTP 4XX response codes
+ // generated by the target instances. This does not include any response
+ // codes generated by the load balancer. Statistics: The most useful statistic
+ // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The
+ // published unit is Count.
+ //
+ // * HTTPCode_Instance_5XX_Count - The number of HTTP 5XX response codes
+ // generated by the target instances. This does not include any response
+ // codes generated by the load balancer. Statistics: The most useful statistic
+ // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The
+ // published unit is Count.
+ //
+ // * HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that
+ // originated from the load balancer. Client errors are generated when requests
+ // are malformed or incomplete. These requests were not received by the target
+ // instance. This count does not include response codes generated by the
+ // target instances. Statistics: The most useful statistic is Sum. Note that
+ // Minimum, Maximum, and Average all return 1. Unit: The published unit is
+ // Count.
+ //
+ // * HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that
+ // originated from the load balancer. This does not include any response
+ // codes generated by the target instance. This metric is reported if there
+ // are no healthy instances attached to the load balancer, or if the request
+ // rate exceeds the capacity of the instances (spillover) or the load balancer.
+ // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum,
+ // and Average all return 1. Unit: The published unit is Count.
+ //
+ // * InstanceResponseTime - The time elapsed, in seconds, after the request
+ // leaves the load balancer until a response from the target instance is
+ // received. Statistics: The most useful statistic is Average. Unit: The
+ // published unit is Seconds.
+ //
+ // * RejectedConnectionCount - The number of connections that were rejected
+ // because the load balancer had reached its maximum number of connections.
+ // Statistics: The most useful statistic is Sum. Unit: The published unit
+ // is Count.
+ //
+ // * RequestCount - The number of requests processed over IPv4. This count
+ // includes only the requests with a response generated by a target instance
+ // of the load balancer. Statistics: The most useful statistic is Sum. Note
+ // that Minimum, Maximum, and Average all return 1. Unit: The published unit
+ // is Count.
+ //
+ // * UnhealthyHostCount - The number of target instances that are considered
+ // unhealthy. Statistics: The most useful statistic are Average, Minimum,
+ // and Maximum. Unit: The published unit is Count.
+ //
+ // MetricName is a required field
+ MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"LoadBalancerMetricName"`
+
+ // The granularity, in seconds, of the returned data points.
+ //
+ // Period is a required field
+ Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"`
+
+ // The start time of the period.
+ //
+ // StartTime is a required field
+ StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
+
+ // The statistic for the metric.
+ //
+ // The following statistics are available:
+ //
+ // * Minimum - The lowest value observed during the specified period. Use
+ // this value to determine low volumes of activity for your application.
+ //
+ // * Maximum - The highest value observed during the specified period. Use
+ // this value to determine high volumes of activity for your application.
+ //
+ // * Sum - All values submitted for the matching metric added together. You
+ // can use this statistic to determine the total volume of a metric.
+ //
+ // * Average - The value of Sum / SampleCount during the specified period.
+ // By comparing this statistic with the Minimum and Maximum values, you can
+ // determine the full scope of a metric and how close the average use is
+ // to the Minimum and Maximum values. This comparison helps you to know when
+ // to increase or decrease your resources.
+ //
+ // * SampleCount - The count, or number, of data points used for the statistical
+ // calculation.
+ //
+ // Statistics is a required field
+ Statistics []*string `locationName:"statistics" type:"list" required:"true"`
+
+ // The unit for the metric data request. Valid units depend on the metric data
+ // being requested. For the valid units with each available metric, see the
+ // metricName parameter.
+ //
+ // Unit is a required field
+ Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancerMetricDataInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancerMetricDataInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetLoadBalancerMetricDataInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetLoadBalancerMetricDataInput"}
+ if s.EndTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndTime"))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+ if s.MetricName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricName"))
+ }
+ if s.Period == nil {
+ invalidParams.Add(request.NewErrParamRequired("Period"))
+ }
+ if s.Period != nil && *s.Period < 60 {
+ invalidParams.Add(request.NewErrParamMinValue("Period", 60))
+ }
+ if s.StartTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("StartTime"))
+ }
+ if s.Statistics == nil {
+ invalidParams.Add(request.NewErrParamRequired("Statistics"))
+ }
+ if s.Unit == nil {
+ invalidParams.Add(request.NewErrParamRequired("Unit"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetLoadBalancerMetricDataInput) SetEndTime(v time.Time) *GetLoadBalancerMetricDataInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *GetLoadBalancerMetricDataInput) SetLoadBalancerName(v string) *GetLoadBalancerMetricDataInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetLoadBalancerMetricDataInput) SetMetricName(v string) *GetLoadBalancerMetricDataInput {
+ s.MetricName = &v
+ return s
+}
+
+// SetPeriod sets the Period field's value.
+func (s *GetLoadBalancerMetricDataInput) SetPeriod(v int64) *GetLoadBalancerMetricDataInput {
+ s.Period = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetLoadBalancerMetricDataInput) SetStartTime(v time.Time) *GetLoadBalancerMetricDataInput {
+ s.StartTime = &v
+ return s
+}
+
+// SetStatistics sets the Statistics field's value.
+func (s *GetLoadBalancerMetricDataInput) SetStatistics(v []*string) *GetLoadBalancerMetricDataInput {
+ s.Statistics = v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *GetLoadBalancerMetricDataInput) SetUnit(v string) *GetLoadBalancerMetricDataInput {
+ s.Unit = &v
+ return s
+}
+
+type GetLoadBalancerMetricDataOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the metric data returned.
+ MetricData []*MetricDatapoint `locationName:"metricData" type:"list"`
+
+ // The name of the metric returned.
+ MetricName *string `locationName:"metricName" type:"string" enum:"LoadBalancerMetricName"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancerMetricDataOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancerMetricDataOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricData sets the MetricData field's value.
+func (s *GetLoadBalancerMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetLoadBalancerMetricDataOutput {
+ s.MetricData = v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetLoadBalancerMetricDataOutput) SetMetricName(v string) *GetLoadBalancerMetricDataOutput {
+ s.MetricName = &v
+ return s
+}
+
+type GetLoadBalancerOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object containing information about your load balancer.
+ LoadBalancer *LoadBalancer `locationName:"loadBalancer" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancerOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancerOutput) GoString() string {
+ return s.String()
+}
+
+// SetLoadBalancer sets the LoadBalancer field's value.
+func (s *GetLoadBalancerOutput) SetLoadBalancer(v *LoadBalancer) *GetLoadBalancerOutput {
+ s.LoadBalancer = v
+ return s
+}
+
+type GetLoadBalancerTlsCertificatesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the load balancer you associated with your SSL/TLS certificate.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancerTlsCertificatesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancerTlsCertificatesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetLoadBalancerTlsCertificatesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetLoadBalancerTlsCertificatesInput"}
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *GetLoadBalancerTlsCertificatesInput) SetLoadBalancerName(v string) *GetLoadBalancerTlsCertificatesInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type GetLoadBalancerTlsCertificatesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of LoadBalancerTlsCertificate objects describing your SSL/TLS certificates.
+ TlsCertificates []*LoadBalancerTlsCertificate `locationName:"tlsCertificates" type:"list"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancerTlsCertificatesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancerTlsCertificatesOutput) GoString() string {
+ return s.String()
+}
+
+// SetTlsCertificates sets the TlsCertificates field's value.
+func (s *GetLoadBalancerTlsCertificatesOutput) SetTlsCertificates(v []*LoadBalancerTlsCertificate) *GetLoadBalancerTlsCertificatesOutput {
+ s.TlsCertificates = v
+ return s
+}
+
+type GetLoadBalancersInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetLoadBalancers request. If your
+ // results are paginated, the response will return a next page token that you
+ // can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancersInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancersInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetLoadBalancersInput) SetPageToken(v string) *GetLoadBalancersInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetLoadBalancersOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of LoadBalancer objects describing your load balancers.
+ LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetLoadBalancers request
+ // and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetLoadBalancersOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLoadBalancersOutput) GoString() string {
+ return s.String()
+}
+
+// SetLoadBalancers sets the LoadBalancers field's value.
+func (s *GetLoadBalancersOutput) SetLoadBalancers(v []*LoadBalancer) *GetLoadBalancersOutput {
+ s.LoadBalancers = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetLoadBalancersOutput) SetNextPageToken(v string) *GetLoadBalancersOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetOperationInput struct {
+ _ struct{} `type:"structure"`
+
+ // A GUID used to identify the operation.
+ //
+ // OperationId is a required field
+ OperationId *string `locationName:"operationId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetOperationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetOperationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetOperationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetOperationInput"}
+ if s.OperationId == nil {
+ invalidParams.Add(request.NewErrParamRequired("OperationId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetOperationId sets the OperationId field's value.
+func (s *GetOperationInput) SetOperationId(v string) *GetOperationInput {
+ s.OperationId = &v
+ return s
+}
+
+type GetOperationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetOperationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetOperationOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *GetOperationOutput) SetOperation(v *Operation) *GetOperationOutput {
+ s.Operation = v
+ return s
+}
+
+type GetOperationsForResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetOperationsForResource request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+
+ // The name of the resource for which you are requesting information.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetOperationsForResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetOperationsForResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetOperationsForResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetOperationsForResourceInput"}
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetOperationsForResourceInput) SetPageToken(v string) *GetOperationsForResourceInput {
+ s.PageToken = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *GetOperationsForResourceInput) SetResourceName(v string) *GetOperationsForResourceInput {
+ s.ResourceName = &v
+ return s
+}
+
+type GetOperationsForResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // (Deprecated) Returns the number of pages of results that remain.
+ //
+ // In releases prior to June 12, 2017, this parameter returned null by the API.
+ // It is now deprecated, and the API returns the next page token parameter instead.
+ //
+ // Deprecated: NextPageCount has been deprecated
+ NextPageCount *string `locationName:"nextPageCount" deprecated:"true" type:"string"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetOperationsForResource
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s GetOperationsForResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetOperationsForResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextPageCount sets the NextPageCount field's value.
+func (s *GetOperationsForResourceOutput) SetNextPageCount(v string) *GetOperationsForResourceOutput {
+ s.NextPageCount = &v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetOperationsForResourceOutput) SetNextPageToken(v string) *GetOperationsForResourceOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+// SetOperations sets the Operations field's value.
+func (s *GetOperationsForResourceOutput) SetOperations(v []*Operation) *GetOperationsForResourceOutput {
+ s.Operations = v
+ return s
+}
+
+type GetOperationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetOperations request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetOperationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetOperationsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetOperationsInput) SetPageToken(v string) *GetOperationsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetOperationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetOperations request and
+ // specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s GetOperationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetOperationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetOperationsOutput) SetNextPageToken(v string) *GetOperationsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+// SetOperations sets the Operations field's value.
+func (s *GetOperationsOutput) SetOperations(v []*Operation) *GetOperationsOutput {
+ s.Operations = v
+ return s
+}
+
+type GetRegionsInput struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value indicating whether to also include Availability Zones in
+ // your get regions request. Availability Zones are indicated with a letter:
+ // e.g., us-east-2a.
+ IncludeAvailabilityZones *bool `locationName:"includeAvailabilityZones" type:"boolean"`
+
+ // A Boolean value indicating whether to also include Availability Zones for
+ // databases in your get regions request. Availability Zones are indicated with
+ // a letter (e.g., us-east-2a).
+ IncludeRelationalDatabaseAvailabilityZones *bool `locationName:"includeRelationalDatabaseAvailabilityZones" type:"boolean"`
+}
+
+// String returns the string representation
+func (s GetRegionsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRegionsInput) GoString() string {
+ return s.String()
+}
+
+// SetIncludeAvailabilityZones sets the IncludeAvailabilityZones field's value.
+func (s *GetRegionsInput) SetIncludeAvailabilityZones(v bool) *GetRegionsInput {
+ s.IncludeAvailabilityZones = &v
+ return s
+}
+
+// SetIncludeRelationalDatabaseAvailabilityZones sets the IncludeRelationalDatabaseAvailabilityZones field's value.
+func (s *GetRegionsInput) SetIncludeRelationalDatabaseAvailabilityZones(v bool) *GetRegionsInput {
+ s.IncludeRelationalDatabaseAvailabilityZones = &v
+ return s
+}
+
+type GetRegionsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about your get regions
+ // request.
+ Regions []*Region `locationName:"regions" type:"list"`
+}
+
+// String returns the string representation
+func (s GetRegionsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRegionsOutput) GoString() string {
+ return s.String()
+}
+
+// SetRegions sets the Regions field's value.
+func (s *GetRegionsOutput) SetRegions(v []*Region) *GetRegionsOutput {
+ s.Regions = v
+ return s
+}
+
+type GetRelationalDatabaseBlueprintsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetRelationalDatabaseBlueprints request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseBlueprintsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseBlueprintsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetRelationalDatabaseBlueprintsInput) SetPageToken(v string) *GetRelationalDatabaseBlueprintsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetRelationalDatabaseBlueprintsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object describing the result of your get relational database blueprints
+ // request.
+ Blueprints []*RelationalDatabaseBlueprint `locationName:"blueprints" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetRelationalDatabaseBlueprints
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseBlueprintsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseBlueprintsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBlueprints sets the Blueprints field's value.
+func (s *GetRelationalDatabaseBlueprintsOutput) SetBlueprints(v []*RelationalDatabaseBlueprint) *GetRelationalDatabaseBlueprintsOutput {
+ s.Blueprints = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetRelationalDatabaseBlueprintsOutput) SetNextPageToken(v string) *GetRelationalDatabaseBlueprintsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetRelationalDatabaseBundlesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetRelationalDatabaseBundles request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseBundlesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseBundlesInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetRelationalDatabaseBundlesInput) SetPageToken(v string) *GetRelationalDatabaseBundlesInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetRelationalDatabaseBundlesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object describing the result of your get relational database bundles request.
+ Bundles []*RelationalDatabaseBundle `locationName:"bundles" type:"list"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetRelationalDatabaseBundles
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseBundlesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseBundlesOutput) GoString() string {
+ return s.String()
+}
+
+// SetBundles sets the Bundles field's value.
+func (s *GetRelationalDatabaseBundlesOutput) SetBundles(v []*RelationalDatabaseBundle) *GetRelationalDatabaseBundlesOutput {
+ s.Bundles = v
+ return s
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetRelationalDatabaseBundlesOutput) SetNextPageToken(v string) *GetRelationalDatabaseBundlesOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+type GetRelationalDatabaseEventsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The number of minutes in the past from which to retrieve events. For example,
+ // to get all events from the past 2 hours, enter 120.
+ //
+ // Default: 60
+ //
+ // The minimum is 1 and the maximum is 14 days (20160 minutes).
+ DurationInMinutes *int64 `locationName:"durationInMinutes" type:"integer"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetRelationalDatabaseEvents request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+
+ // The name of the database from which to get events.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseEventsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseEventsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseEventsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseEventsInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationInMinutes sets the DurationInMinutes field's value.
+func (s *GetRelationalDatabaseEventsInput) SetDurationInMinutes(v int64) *GetRelationalDatabaseEventsInput {
+ s.DurationInMinutes = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetRelationalDatabaseEventsInput) SetPageToken(v string) *GetRelationalDatabaseEventsInput {
+ s.PageToken = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *GetRelationalDatabaseEventsInput) SetRelationalDatabaseName(v string) *GetRelationalDatabaseEventsInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type GetRelationalDatabaseEventsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetRelationalDatabaseEvents
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+
+ // An object describing the result of your get relational database events request.
+ RelationalDatabaseEvents []*RelationalDatabaseEvent `locationName:"relationalDatabaseEvents" type:"list"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseEventsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseEventsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetRelationalDatabaseEventsOutput) SetNextPageToken(v string) *GetRelationalDatabaseEventsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+// SetRelationalDatabaseEvents sets the RelationalDatabaseEvents field's value.
+func (s *GetRelationalDatabaseEventsOutput) SetRelationalDatabaseEvents(v []*RelationalDatabaseEvent) *GetRelationalDatabaseEventsOutput {
+ s.RelationalDatabaseEvents = v
+ return s
+}
+
+type GetRelationalDatabaseInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the database that you are looking up.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *GetRelationalDatabaseInput) SetRelationalDatabaseName(v string) *GetRelationalDatabaseInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type GetRelationalDatabaseLogEventsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The end of the time interval from which to get log events.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use an
+ // end time of October 1, 2018, at 8 PM UTC, then you input 1538424000 as
+ // the end time.
+ EndTime *time.Time `locationName:"endTime" type:"timestamp"`
+
+ // The name of the log stream.
+ //
+ // Use the get relational database log streams operation to get a list of available
+ // log streams.
+ //
+ // LogStreamName is a required field
+ LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"`
+
+ // The token to advance to the next or previous page of results from your request.
+ //
+ // To get a page token, perform an initial GetRelationalDatabaseLogEvents request.
+ // If your results are paginated, the response will return a next forward token
+ // and/or next backward token that you can specify as the page token in a subsequent
+ // request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+
+ // The name of your database for which to get log events.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // Parameter to specify if the log should start from head or tail. If true is
+ // specified, the log event starts from the head of the log. If false is specified,
+ // the log event starts from the tail of the log.
+ //
+ // For PostgreSQL, the default value of false is the only option available.
+ StartFromHead *bool `locationName:"startFromHead" type:"boolean"`
+
+ // The start of the time interval from which to get log events.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use a
+ // start time of October 1, 2018, at 8 PM UTC, then you input 1538424000
+ // as the start time.
+ StartTime *time.Time `locationName:"startTime" type:"timestamp"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseLogEventsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseLogEventsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseLogEventsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseLogEventsInput"}
+ if s.LogStreamName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LogStreamName"))
+ }
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetRelationalDatabaseLogEventsInput) SetEndTime(v time.Time) *GetRelationalDatabaseLogEventsInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetLogStreamName sets the LogStreamName field's value.
+func (s *GetRelationalDatabaseLogEventsInput) SetLogStreamName(v string) *GetRelationalDatabaseLogEventsInput {
+ s.LogStreamName = &v
+ return s
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetRelationalDatabaseLogEventsInput) SetPageToken(v string) *GetRelationalDatabaseLogEventsInput {
+ s.PageToken = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *GetRelationalDatabaseLogEventsInput) SetRelationalDatabaseName(v string) *GetRelationalDatabaseLogEventsInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetStartFromHead sets the StartFromHead field's value.
+func (s *GetRelationalDatabaseLogEventsInput) SetStartFromHead(v bool) *GetRelationalDatabaseLogEventsInput {
+ s.StartFromHead = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetRelationalDatabaseLogEventsInput) SetStartTime(v time.Time) *GetRelationalDatabaseLogEventsInput {
+ s.StartTime = &v
+ return s
+}
+
+type GetRelationalDatabaseLogEventsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A token used for advancing to the previous page of results from your get
+ // relational database log events request.
+ NextBackwardToken *string `locationName:"nextBackwardToken" type:"string"`
+
+ // A token used for advancing to the next page of results from your get relational
+ // database log events request.
+ NextForwardToken *string `locationName:"nextForwardToken" type:"string"`
+
+ // An object describing the result of your get relational database log events
+ // request.
+ ResourceLogEvents []*LogEvent `locationName:"resourceLogEvents" type:"list"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseLogEventsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseLogEventsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextBackwardToken sets the NextBackwardToken field's value.
+func (s *GetRelationalDatabaseLogEventsOutput) SetNextBackwardToken(v string) *GetRelationalDatabaseLogEventsOutput {
+ s.NextBackwardToken = &v
+ return s
+}
+
+// SetNextForwardToken sets the NextForwardToken field's value.
+func (s *GetRelationalDatabaseLogEventsOutput) SetNextForwardToken(v string) *GetRelationalDatabaseLogEventsOutput {
+ s.NextForwardToken = &v
+ return s
+}
+
+// SetResourceLogEvents sets the ResourceLogEvents field's value.
+func (s *GetRelationalDatabaseLogEventsOutput) SetResourceLogEvents(v []*LogEvent) *GetRelationalDatabaseLogEventsOutput {
+ s.ResourceLogEvents = v
+ return s
+}
+
+type GetRelationalDatabaseLogStreamsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of your database for which to get log streams.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseLogStreamsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseLogStreamsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseLogStreamsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseLogStreamsInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *GetRelationalDatabaseLogStreamsInput) SetRelationalDatabaseName(v string) *GetRelationalDatabaseLogStreamsInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type GetRelationalDatabaseLogStreamsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object describing the result of your get relational database log streams
+ // request.
+ LogStreams []*string `locationName:"logStreams" type:"list"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseLogStreamsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseLogStreamsOutput) GoString() string {
+ return s.String()
+}
+
+// SetLogStreams sets the LogStreams field's value.
+func (s *GetRelationalDatabaseLogStreamsOutput) SetLogStreams(v []*string) *GetRelationalDatabaseLogStreamsOutput {
+ s.LogStreams = v
+ return s
+}
+
+type GetRelationalDatabaseMasterUserPasswordInput struct {
+ _ struct{} `type:"structure"`
+
+ // The password version to return.
+ //
+ // Specifying CURRENT or PREVIOUS returns the current or previous passwords
+ // respectively. Specifying PENDING returns the newest version of the password
+ // that will rotate to CURRENT. After the PENDING password rotates to CURRENT,
+ // the PENDING password is no longer available.
+ //
+ // Default: CURRENT
+ PasswordVersion *string `locationName:"passwordVersion" type:"string" enum:"RelationalDatabasePasswordVersion"`
+
+ // The name of your database for which to get the master user password.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseMasterUserPasswordInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseMasterUserPasswordInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseMasterUserPasswordInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseMasterUserPasswordInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPasswordVersion sets the PasswordVersion field's value.
+func (s *GetRelationalDatabaseMasterUserPasswordInput) SetPasswordVersion(v string) *GetRelationalDatabaseMasterUserPasswordInput {
+ s.PasswordVersion = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *GetRelationalDatabaseMasterUserPasswordInput) SetRelationalDatabaseName(v string) *GetRelationalDatabaseMasterUserPasswordInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type GetRelationalDatabaseMasterUserPasswordOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp when the specified version of the master user password was
+ // created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The master user password for the password version specified.
+ MasterUserPassword *string `locationName:"masterUserPassword" type:"string" sensitive:"true"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseMasterUserPasswordOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseMasterUserPasswordOutput) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *GetRelationalDatabaseMasterUserPasswordOutput) SetCreatedAt(v time.Time) *GetRelationalDatabaseMasterUserPasswordOutput {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetMasterUserPassword sets the MasterUserPassword field's value.
+func (s *GetRelationalDatabaseMasterUserPasswordOutput) SetMasterUserPassword(v string) *GetRelationalDatabaseMasterUserPasswordOutput {
+ s.MasterUserPassword = &v
+ return s
+}
+
+type GetRelationalDatabaseMetricDataInput struct {
+ _ struct{} `type:"structure"`
+
+ // The end of the time interval from which to get metric data.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use an
+ // end time of October 1, 2018, at 8 PM UTC, then you input 1538424000 as
+ // the end time.
+ //
+ // EndTime is a required field
+ EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"`
+
+ // The metric for which you want to return information.
+ //
+ // Valid relational database metric names are listed below, along with the most
+ // useful statistics to include in your request, and the published unit value.
+ // All relational database metric data is available in 1-minute (60 seconds)
+ // granularity.
+ //
+ // * CPUUtilization - The percentage of CPU utilization currently in use
+ // on the database. Statistics: The most useful statistics are Maximum and
+ // Average. Unit: The published unit is Percent.
+ //
+ // * DatabaseConnections - The number of database connections in use. Statistics:
+ // The most useful statistics are Maximum and Sum. Unit: The published unit
+ // is Count.
+ //
+ // * DiskQueueDepth - The number of outstanding IOs (read/write requests)
+ // that are waiting to access the disk. Statistics: The most useful statistic
+ // is Sum. Unit: The published unit is Count.
+ //
+ // * FreeStorageSpace - The amount of available storage space. Statistics:
+ // The most useful statistic is Sum. Unit: The published unit is Bytes.
+ //
+ // * NetworkReceiveThroughput - The incoming (Receive) network traffic on
+ // the database, including both customer database traffic and AWS traffic
+ // used for monitoring and replication. Statistics: The most useful statistic
+ // is Average. Unit: The published unit is Bytes/Second.
+ //
+ // * NetworkTransmitThroughput - The outgoing (Transmit) network traffic
+ // on the database, including both customer database traffic and AWS traffic
+ // used for monitoring and replication. Statistics: The most useful statistic
+ // is Average. Unit: The published unit is Bytes/Second.
+ //
+ // MetricName is a required field
+ MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"RelationalDatabaseMetricName"`
+
+ // The granularity, in seconds, of the returned data points.
+ //
+ // All relational database metric data is available in 1-minute (60 seconds)
+ // granularity.
+ //
+ // Period is a required field
+ Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"`
+
+ // The name of your database from which to get metric data.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // The start of the time interval from which to get metric data.
+ //
+ // Constraints:
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Specified in the Unix time format. For example, if you wish to use a
+ // start time of October 1, 2018, at 8 PM UTC, then you input 1538424000
+ // as the start time.
+ //
+ // StartTime is a required field
+ StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
+
+ // The statistic for the metric.
+ //
+ // The following statistics are available:
+ //
+ // * Minimum - The lowest value observed during the specified period. Use
+ // this value to determine low volumes of activity for your application.
+ //
+ // * Maximum - The highest value observed during the specified period. Use
+ // this value to determine high volumes of activity for your application.
+ //
+ // * Sum - All values submitted for the matching metric added together. You
+ // can use this statistic to determine the total volume of a metric.
+ //
+ // * Average - The value of Sum / SampleCount during the specified period.
+ // By comparing this statistic with the Minimum and Maximum values, you can
+ // determine the full scope of a metric and how close the average use is
+ // to the Minimum and Maximum values. This comparison helps you to know when
+ // to increase or decrease your resources.
+ //
+ // * SampleCount - The count, or number, of data points used for the statistical
+ // calculation.
+ //
+ // Statistics is a required field
+ Statistics []*string `locationName:"statistics" type:"list" required:"true"`
+
+ // The unit for the metric data request. Valid units depend on the metric data
+ // being requested. For the valid units with each available metric, see the
+ // metricName parameter.
+ //
+ // Unit is a required field
+ Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseMetricDataInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseMetricDataInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseMetricDataInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseMetricDataInput"}
+ if s.EndTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndTime"))
+ }
+ if s.MetricName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricName"))
+ }
+ if s.Period == nil {
+ invalidParams.Add(request.NewErrParamRequired("Period"))
+ }
+ if s.Period != nil && *s.Period < 60 {
+ invalidParams.Add(request.NewErrParamMinValue("Period", 60))
+ }
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+ if s.StartTime == nil {
+ invalidParams.Add(request.NewErrParamRequired("StartTime"))
+ }
+ if s.Statistics == nil {
+ invalidParams.Add(request.NewErrParamRequired("Statistics"))
+ }
+ if s.Unit == nil {
+ invalidParams.Add(request.NewErrParamRequired("Unit"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEndTime sets the EndTime field's value.
+func (s *GetRelationalDatabaseMetricDataInput) SetEndTime(v time.Time) *GetRelationalDatabaseMetricDataInput {
+ s.EndTime = &v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetRelationalDatabaseMetricDataInput) SetMetricName(v string) *GetRelationalDatabaseMetricDataInput {
+ s.MetricName = &v
+ return s
+}
+
+// SetPeriod sets the Period field's value.
+func (s *GetRelationalDatabaseMetricDataInput) SetPeriod(v int64) *GetRelationalDatabaseMetricDataInput {
+ s.Period = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *GetRelationalDatabaseMetricDataInput) SetRelationalDatabaseName(v string) *GetRelationalDatabaseMetricDataInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetStartTime sets the StartTime field's value.
+func (s *GetRelationalDatabaseMetricDataInput) SetStartTime(v time.Time) *GetRelationalDatabaseMetricDataInput {
+ s.StartTime = &v
+ return s
+}
+
+// SetStatistics sets the Statistics field's value.
+func (s *GetRelationalDatabaseMetricDataInput) SetStatistics(v []*string) *GetRelationalDatabaseMetricDataInput {
+ s.Statistics = v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *GetRelationalDatabaseMetricDataInput) SetUnit(v string) *GetRelationalDatabaseMetricDataInput {
+ s.Unit = &v
+ return s
+}
+
+type GetRelationalDatabaseMetricDataOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the metric data returned.
+ MetricData []*MetricDatapoint `locationName:"metricData" type:"list"`
+
+ // The name of the metric returned.
+ MetricName *string `locationName:"metricName" type:"string" enum:"RelationalDatabaseMetricName"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseMetricDataOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseMetricDataOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricData sets the MetricData field's value.
+func (s *GetRelationalDatabaseMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetRelationalDatabaseMetricDataOutput {
+ s.MetricData = v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *GetRelationalDatabaseMetricDataOutput) SetMetricName(v string) *GetRelationalDatabaseMetricDataOutput {
+ s.MetricName = &v
+ return s
+}
+
+type GetRelationalDatabaseOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object describing the specified database.
+ RelationalDatabase *RelationalDatabase `locationName:"relationalDatabase" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseOutput) GoString() string {
+ return s.String()
+}
+
+// SetRelationalDatabase sets the RelationalDatabase field's value.
+func (s *GetRelationalDatabaseOutput) SetRelationalDatabase(v *RelationalDatabase) *GetRelationalDatabaseOutput {
+ s.RelationalDatabase = v
+ return s
+}
+
+type GetRelationalDatabaseParametersInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetRelationalDatabaseParameters request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+
+ // The name of your database for which to get parameters.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseParametersInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseParametersInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseParametersInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseParametersInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetRelationalDatabaseParametersInput) SetPageToken(v string) *GetRelationalDatabaseParametersInput {
+ s.PageToken = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *GetRelationalDatabaseParametersInput) SetRelationalDatabaseName(v string) *GetRelationalDatabaseParametersInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type GetRelationalDatabaseParametersOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetRelationalDatabaseParameters
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+
+ // An object describing the result of your get relational database parameters
+ // request.
+ Parameters []*RelationalDatabaseParameter `locationName:"parameters" type:"list"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseParametersOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseParametersOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetRelationalDatabaseParametersOutput) SetNextPageToken(v string) *GetRelationalDatabaseParametersOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+// SetParameters sets the Parameters field's value.
+func (s *GetRelationalDatabaseParametersOutput) SetParameters(v []*RelationalDatabaseParameter) *GetRelationalDatabaseParametersOutput {
+ s.Parameters = v
+ return s
+}
+
+type GetRelationalDatabaseSnapshotInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the database snapshot for which to get information.
+ //
+ // RelationalDatabaseSnapshotName is a required field
+ RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseSnapshotInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseSnapshotInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRelationalDatabaseSnapshotInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRelationalDatabaseSnapshotInput"}
+ if s.RelationalDatabaseSnapshotName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseSnapshotName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value.
+func (s *GetRelationalDatabaseSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *GetRelationalDatabaseSnapshotInput {
+ s.RelationalDatabaseSnapshotName = &v
+ return s
+}
+
+type GetRelationalDatabaseSnapshotOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object describing the specified database snapshot.
+ RelationalDatabaseSnapshot *RelationalDatabaseSnapshot `locationName:"relationalDatabaseSnapshot" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseSnapshotOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseSnapshotOutput) GoString() string {
+ return s.String()
+}
+
+// SetRelationalDatabaseSnapshot sets the RelationalDatabaseSnapshot field's value.
+func (s *GetRelationalDatabaseSnapshotOutput) SetRelationalDatabaseSnapshot(v *RelationalDatabaseSnapshot) *GetRelationalDatabaseSnapshotOutput {
+ s.RelationalDatabaseSnapshot = v
+ return s
+}
+
+type GetRelationalDatabaseSnapshotsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetRelationalDatabaseSnapshots request.
+ // If your results are paginated, the response will return a next page token
+ // that you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseSnapshotsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseSnapshotsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetRelationalDatabaseSnapshotsInput) SetPageToken(v string) *GetRelationalDatabaseSnapshotsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetRelationalDatabaseSnapshotsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetRelationalDatabaseSnapshots
+ // request and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+
+ // An object describing the result of your get relational database snapshots
+ // request.
+ RelationalDatabaseSnapshots []*RelationalDatabaseSnapshot `locationName:"relationalDatabaseSnapshots" type:"list"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabaseSnapshotsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabaseSnapshotsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetRelationalDatabaseSnapshotsOutput) SetNextPageToken(v string) *GetRelationalDatabaseSnapshotsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+// SetRelationalDatabaseSnapshots sets the RelationalDatabaseSnapshots field's value.
+func (s *GetRelationalDatabaseSnapshotsOutput) SetRelationalDatabaseSnapshots(v []*RelationalDatabaseSnapshot) *GetRelationalDatabaseSnapshotsOutput {
+ s.RelationalDatabaseSnapshots = v
+ return s
+}
+
+type GetRelationalDatabasesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetRelationalDatabases request. If
+ // your results are paginated, the response will return a next page token that
+ // you can specify as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabasesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabasesInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetRelationalDatabasesInput) SetPageToken(v string) *GetRelationalDatabasesInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetRelationalDatabasesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetRelationalDatabases request
+ // and specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+
+ // An object describing the result of your get relational databases request.
+ RelationalDatabases []*RelationalDatabase `locationName:"relationalDatabases" type:"list"`
+}
+
+// String returns the string representation
+func (s GetRelationalDatabasesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRelationalDatabasesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetRelationalDatabasesOutput) SetNextPageToken(v string) *GetRelationalDatabasesOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+// SetRelationalDatabases sets the RelationalDatabases field's value.
+func (s *GetRelationalDatabasesOutput) SetRelationalDatabases(v []*RelationalDatabase) *GetRelationalDatabasesOutput {
+ s.RelationalDatabases = v
+ return s
+}
+
+type GetStaticIpInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the static IP in Lightsail.
+ //
+ // StaticIpName is a required field
+ StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetStaticIpInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetStaticIpInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetStaticIpInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetStaticIpInput"}
+ if s.StaticIpName == nil {
+ invalidParams.Add(request.NewErrParamRequired("StaticIpName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetStaticIpName sets the StaticIpName field's value.
+func (s *GetStaticIpInput) SetStaticIpName(v string) *GetStaticIpInput {
+ s.StaticIpName = &v
+ return s
+}
+
+type GetStaticIpOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the requested static
+ // IP.
+ StaticIp *StaticIp `locationName:"staticIp" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetStaticIpOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetStaticIpOutput) GoString() string {
+ return s.String()
+}
+
+// SetStaticIp sets the StaticIp field's value.
+func (s *GetStaticIpOutput) SetStaticIp(v *StaticIp) *GetStaticIpOutput {
+ s.StaticIp = v
+ return s
+}
+
+type GetStaticIpsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // To get a page token, perform an initial GetStaticIps request. If your results
+ // are paginated, the response will return a next page token that you can specify
+ // as the page token in a subsequent request.
+ PageToken *string `locationName:"pageToken" type:"string"`
+}
+
+// String returns the string representation
+func (s GetStaticIpsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetStaticIpsInput) GoString() string {
+ return s.String()
+}
+
+// SetPageToken sets the PageToken field's value.
+func (s *GetStaticIpsInput) SetPageToken(v string) *GetStaticIpsInput {
+ s.PageToken = &v
+ return s
+}
+
+type GetStaticIpsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to advance to the next page of results from your request.
+ //
+ // A next page token is not returned if there are no more results to display.
+ //
+ // To get the next page of results, perform another GetStaticIps request and
+ // specify the next page token using the pageToken parameter.
+ NextPageToken *string `locationName:"nextPageToken" type:"string"`
+
+ // An array of key-value pairs containing information about your get static
+ // IPs request.
+ StaticIps []*StaticIp `locationName:"staticIps" type:"list"`
+}
+
+// String returns the string representation
+func (s GetStaticIpsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetStaticIpsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextPageToken sets the NextPageToken field's value.
+func (s *GetStaticIpsOutput) SetNextPageToken(v string) *GetStaticIpsOutput {
+ s.NextPageToken = &v
+ return s
+}
+
+// SetStaticIps sets the StaticIps field's value.
+func (s *GetStaticIpsOutput) SetStaticIps(v []*StaticIp) *GetStaticIpsOutput {
+ s.StaticIps = v
+ return s
+}
+
+// Describes the request headers that a Lightsail distribution bases caching
+// on.
+//
+// For the headers that you specify, your distribution caches separate versions
+// of the specified content based on the header values in viewer requests. For
+// example, suppose viewer requests for logo.jpg contain a custom product header
+// that has a value of either acme or apex, and you configure your distribution
+// to cache your content based on values in the product header. Your distribution
+// forwards the product header to the origin and caches the response from the
+// origin once for each header value.
+type HeaderObject struct {
+ _ struct{} `type:"structure"`
+
+ // The specific headers to forward to your distribution's origin.
+ HeadersAllowList []*string `locationName:"headersAllowList" type:"list"`
+
+ // The headers that you want your distribution to forward to your origin and
+ // base caching on.
+ //
+ // You can configure your distribution to do one of the following:
+ //
+ // * all - Forward all headers to your origin.
+ //
+ // * none - Forward only the default headers.
+ //
+ // * allow-list - Forward only the headers you specify using the headersAllowList
+ // parameter.
+ Option *string `locationName:"option" type:"string" enum:"ForwardValues"`
+}
+
+// String returns the string representation
+func (s HeaderObject) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeaderObject) GoString() string {
+ return s.String()
+}
+
+// SetHeadersAllowList sets the HeadersAllowList field's value.
+func (s *HeaderObject) SetHeadersAllowList(v []*string) *HeaderObject {
+ s.HeadersAllowList = v
+ return s
+}
+
+// SetOption sets the Option field's value.
+func (s *HeaderObject) SetOption(v string) *HeaderObject {
+ s.Option = &v
+ return s
+}
+
+// Describes the public SSH host keys or the RDP certificate.
+type HostKeyAttributes struct {
+ _ struct{} `type:"structure"`
+
+ // The SSH host key algorithm or the RDP certificate format.
+ //
+ // For SSH host keys, the algorithm may be ssh-rsa, ecdsa-sha2-nistp256, ssh-ed25519,
+ // etc. For RDP certificates, the algorithm is always x509-cert.
+ Algorithm *string `locationName:"algorithm" type:"string"`
+
+ // The SHA-1 fingerprint of the returned SSH host key or RDP certificate.
+ //
+ // * Example of an SHA-1 SSH fingerprint: SHA1:1CHH6FaAaXjtFOsR/t83vf91SR0
+ //
+ // * Example of an SHA-1 RDP fingerprint: af:34:51:fe:09:f0:e0:da:b8:4e:56:ca:60:c2:10:ff:38:06:db:45
+ FingerprintSHA1 *string `locationName:"fingerprintSHA1" type:"string"`
+
+ // The SHA-256 fingerprint of the returned SSH host key or RDP certificate.
+ //
+ // * Example of an SHA-256 SSH fingerprint: SHA256:KTsMnRBh1IhD17HpdfsbzeGA4jOijm5tyXsMjKVbB8o
+ //
+ // * Example of an SHA-256 RDP fingerprint: 03:9b:36:9f:4b:de:4e:61:70:fc:7c:c9:78:e7:d2:1a:1c:25:a8:0c:91:f6:7c:e4:d6:a0:85:c8:b4:53:99:68
+ FingerprintSHA256 *string `locationName:"fingerprintSHA256" type:"string"`
+
+ // The returned RDP certificate is not valid after this point in time.
+ //
+ // This value is listed only for RDP certificates.
+ NotValidAfter *time.Time `locationName:"notValidAfter" type:"timestamp"`
+
+ // The returned RDP certificate is valid after this point in time.
+ //
+ // This value is listed only for RDP certificates.
+ NotValidBefore *time.Time `locationName:"notValidBefore" type:"timestamp"`
+
+ // The public SSH host key or the RDP certificate.
+ PublicKey *string `locationName:"publicKey" type:"string"`
+
+ // The time that the SSH host key or RDP certificate was recorded by Lightsail.
+ WitnessedAt *time.Time `locationName:"witnessedAt" type:"timestamp"`
+}
+
+// String returns the string representation
+func (s HostKeyAttributes) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HostKeyAttributes) GoString() string {
+ return s.String()
+}
+
+// SetAlgorithm sets the Algorithm field's value.
+func (s *HostKeyAttributes) SetAlgorithm(v string) *HostKeyAttributes {
+ s.Algorithm = &v
+ return s
+}
+
+// SetFingerprintSHA1 sets the FingerprintSHA1 field's value.
+func (s *HostKeyAttributes) SetFingerprintSHA1(v string) *HostKeyAttributes {
+ s.FingerprintSHA1 = &v
+ return s
+}
+
+// SetFingerprintSHA256 sets the FingerprintSHA256 field's value.
+func (s *HostKeyAttributes) SetFingerprintSHA256(v string) *HostKeyAttributes {
+ s.FingerprintSHA256 = &v
+ return s
+}
+
+// SetNotValidAfter sets the NotValidAfter field's value.
+func (s *HostKeyAttributes) SetNotValidAfter(v time.Time) *HostKeyAttributes {
+ s.NotValidAfter = &v
+ return s
+}
+
+// SetNotValidBefore sets the NotValidBefore field's value.
+func (s *HostKeyAttributes) SetNotValidBefore(v time.Time) *HostKeyAttributes {
+ s.NotValidBefore = &v
+ return s
+}
+
+// SetPublicKey sets the PublicKey field's value.
+func (s *HostKeyAttributes) SetPublicKey(v string) *HostKeyAttributes {
+ s.PublicKey = &v
+ return s
+}
+
+// SetWitnessedAt sets the WitnessedAt field's value.
+func (s *HostKeyAttributes) SetWitnessedAt(v time.Time) *HostKeyAttributes {
+ s.WitnessedAt = &v
+ return s
+}
+
+type ImportKeyPairInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the key pair for which you want to import the public key.
+ //
+ // KeyPairName is a required field
+ KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"`
+
+ // A base64-encoded public key of the ssh-rsa type.
+ //
+ // PublicKeyBase64 is a required field
+ PublicKeyBase64 *string `locationName:"publicKeyBase64" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ImportKeyPairInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ImportKeyPairInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ImportKeyPairInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ImportKeyPairInput"}
+ if s.KeyPairName == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyPairName"))
+ }
+ if s.PublicKeyBase64 == nil {
+ invalidParams.Add(request.NewErrParamRequired("PublicKeyBase64"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKeyPairName sets the KeyPairName field's value.
+func (s *ImportKeyPairInput) SetKeyPairName(v string) *ImportKeyPairInput {
+ s.KeyPairName = &v
+ return s
+}
+
+// SetPublicKeyBase64 sets the PublicKeyBase64 field's value.
+func (s *ImportKeyPairInput) SetPublicKeyBase64(v string) *ImportKeyPairInput {
+ s.PublicKeyBase64 = &v
+ return s
+}
+
+type ImportKeyPairOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s ImportKeyPairOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ImportKeyPairOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *ImportKeyPairOutput) SetOperation(v *Operation) *ImportKeyPairOutput {
+ s.Operation = v
+ return s
+}
+
+// Describes the origin resource of an Amazon Lightsail content delivery network
+// (CDN) distribution.
+//
+// An origin can be a Lightsail instance or load balancer. A distribution pulls
+// content from an origin, caches it, and serves it to viewers via a worldwide
+// network of edge servers.
+type InputOrigin struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the origin resource.
+ Name *string `locationName:"name" type:"string"`
+
+ // The protocol that your Amazon Lightsail distribution uses when establishing
+ // a connection with your origin to pull content.
+ ProtocolPolicy *string `locationName:"protocolPolicy" type:"string" enum:"OriginProtocolPolicyEnum"`
+
+ // The AWS Region name of the origin resource.
+ RegionName *string `locationName:"regionName" type:"string" enum:"RegionName"`
+}
+
+// String returns the string representation
+func (s InputOrigin) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InputOrigin) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *InputOrigin) SetName(v string) *InputOrigin {
+ s.Name = &v
+ return s
+}
+
+// SetProtocolPolicy sets the ProtocolPolicy field's value.
+func (s *InputOrigin) SetProtocolPolicy(v string) *InputOrigin {
+ s.ProtocolPolicy = &v
+ return s
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *InputOrigin) SetRegionName(v string) *InputOrigin {
+ s.RegionName = &v
+ return s
+}
+
+// Describes an instance (a virtual private server).
+type Instance struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects representing the add-ons enabled on the instance.
+ AddOns []*AddOn `locationName:"addOns" type:"list"`
+
+ // The Amazon Resource Name (ARN) of the instance (e.g., arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE).
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The blueprint ID (e.g., os_amlinux_2016_03).
+ BlueprintId *string `locationName:"blueprintId" type:"string"`
+
+ // The friendly name of the blueprint (e.g., Amazon Linux).
+ BlueprintName *string `locationName:"blueprintName" type:"string"`
+
+ // The bundle for the instance (e.g., micro_1_0).
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // The timestamp when the instance was created (e.g., 1479734909.17) in Unix
+ // time format.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The size of the vCPU and the amount of RAM for the instance.
+ Hardware *InstanceHardware `locationName:"hardware" type:"structure"`
+
+ // The IP address type of the instance.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
+
+ // The IPv6 addresses of the instance.
+ Ipv6Addresses []*string `locationName:"ipv6Addresses" type:"list"`
+
+ // A Boolean value indicating whether this instance has a static IP assigned
+ // to it.
+ IsStaticIp *bool `locationName:"isStaticIp" type:"boolean"`
+
+ // The region name and Availability Zone where the instance is located.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name the user gave the instance (e.g., Amazon_Linux-1GB-Ohio-1).
+ Name *string `locationName:"name" type:"string"`
+
+ // Information about the public ports and monthly data transfer rates for the
+ // instance.
+ Networking *InstanceNetworking `locationName:"networking" type:"structure"`
+
+ // The private IP address of the instance.
+ PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
+
+ // The public IP address of the instance.
+ PublicIpAddress *string `locationName:"publicIpAddress" type:"string"`
+
+ // The type of resource (usually Instance).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The name of the SSH key being used to connect to the instance (e.g., LightsailDefaultKeyPair).
+ SshKeyName *string `locationName:"sshKeyName" type:"string"`
+
+ // The status code and the state (e.g., running) for the instance.
+ State *InstanceState `locationName:"state" type:"structure"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about an instance or another resource in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // The user name for connecting to the instance (e.g., ec2-user).
+ Username *string `locationName:"username" type:"string"`
+}
+
+// String returns the string representation
+func (s Instance) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Instance) GoString() string {
+ return s.String()
+}
+
+// SetAddOns sets the AddOns field's value.
+func (s *Instance) SetAddOns(v []*AddOn) *Instance {
+ s.AddOns = v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *Instance) SetArn(v string) *Instance {
+ s.Arn = &v
+ return s
+}
+
+// SetBlueprintId sets the BlueprintId field's value.
+func (s *Instance) SetBlueprintId(v string) *Instance {
+ s.BlueprintId = &v
+ return s
+}
+
+// SetBlueprintName sets the BlueprintName field's value.
+func (s *Instance) SetBlueprintName(v string) *Instance {
+ s.BlueprintName = &v
+ return s
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *Instance) SetBundleId(v string) *Instance {
+ s.BundleId = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Instance) SetCreatedAt(v time.Time) *Instance {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetHardware sets the Hardware field's value.
+func (s *Instance) SetHardware(v *InstanceHardware) *Instance {
+ s.Hardware = v
+ return s
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *Instance) SetIpAddressType(v string) *Instance {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetIpv6Addresses sets the Ipv6Addresses field's value.
+func (s *Instance) SetIpv6Addresses(v []*string) *Instance {
+ s.Ipv6Addresses = v
+ return s
+}
+
+// SetIsStaticIp sets the IsStaticIp field's value.
+func (s *Instance) SetIsStaticIp(v bool) *Instance {
+ s.IsStaticIp = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *Instance) SetLocation(v *ResourceLocation) *Instance {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Instance) SetName(v string) *Instance {
+ s.Name = &v
+ return s
+}
+
+// SetNetworking sets the Networking field's value.
+func (s *Instance) SetNetworking(v *InstanceNetworking) *Instance {
+ s.Networking = v
+ return s
+}
+
+// SetPrivateIpAddress sets the PrivateIpAddress field's value.
+func (s *Instance) SetPrivateIpAddress(v string) *Instance {
+ s.PrivateIpAddress = &v
+ return s
+}
+
+// SetPublicIpAddress sets the PublicIpAddress field's value.
+func (s *Instance) SetPublicIpAddress(v string) *Instance {
+ s.PublicIpAddress = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *Instance) SetResourceType(v string) *Instance {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSshKeyName sets the SshKeyName field's value.
+func (s *Instance) SetSshKeyName(v string) *Instance {
+ s.SshKeyName = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *Instance) SetState(v *InstanceState) *Instance {
+ s.State = v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *Instance) SetSupportCode(v string) *Instance {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *Instance) SetTags(v []*Tag) *Instance {
+ s.Tags = v
+ return s
+}
+
+// SetUsername sets the Username field's value.
+func (s *Instance) SetUsername(v string) *Instance {
+ s.Username = &v
+ return s
+}
+
+// The parameters for gaining temporary access to one of your Amazon Lightsail
+// instances.
+type InstanceAccessDetails struct {
+ _ struct{} `type:"structure"`
+
+ // For SSH access, the public key to use when accessing your instance For OpenSSH
+ // clients (e.g., command line SSH), you should save this value to tempkey-cert.pub.
+ CertKey *string `locationName:"certKey" type:"string"`
+
+ // For SSH access, the date on which the temporary keys expire.
+ ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp"`
+
+ // Describes the public SSH host keys or the RDP certificate.
+ HostKeys []*HostKeyAttributes `locationName:"hostKeys" type:"list"`
+
+ // The name of this Amazon Lightsail instance.
+ InstanceName *string `locationName:"instanceName" type:"string"`
+
+ // The public IP address of the Amazon Lightsail instance.
+ IpAddress *string `locationName:"ipAddress" type:"string"`
+
+ // For RDP access, the password for your Amazon Lightsail instance. Password
+ // will be an empty string if the password for your new instance is not ready
+ // yet. When you create an instance, it can take up to 15 minutes for the instance
+ // to be ready.
+ //
+ // If you create an instance using any key pair other than the default (LightsailDefaultKeyPair),
+ // password will always be an empty string.
+ //
+ // If you change the Administrator password on the instance, Lightsail will
+ // continue to return the original password value. When accessing the instance
+ // using RDP, you need to manually enter the Administrator password after changing
+ // it from the default.
+ Password *string `locationName:"password" type:"string"`
+
+ // For a Windows Server-based instance, an object with the data you can use
+ // to retrieve your password. This is only needed if password is empty and the
+ // instance is not new (and therefore the password is not ready yet). When you
+ // create an instance, it can take up to 15 minutes for the instance to be ready.
+ PasswordData *PasswordData `locationName:"passwordData" type:"structure"`
+
+ // For SSH access, the temporary private key. For OpenSSH clients (e.g., command
+ // line SSH), you should save this value to tempkey).
+ PrivateKey *string `locationName:"privateKey" type:"string"`
+
+ // The protocol for these Amazon Lightsail instance access details.
+ Protocol *string `locationName:"protocol" type:"string" enum:"InstanceAccessProtocol"`
+
+ // The user name to use when logging in to the Amazon Lightsail instance.
+ Username *string `locationName:"username" type:"string"`
+}
+
+// String returns the string representation
+func (s InstanceAccessDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceAccessDetails) GoString() string {
+ return s.String()
+}
+
+// SetCertKey sets the CertKey field's value.
+func (s *InstanceAccessDetails) SetCertKey(v string) *InstanceAccessDetails {
+ s.CertKey = &v
+ return s
+}
+
+// SetExpiresAt sets the ExpiresAt field's value.
+func (s *InstanceAccessDetails) SetExpiresAt(v time.Time) *InstanceAccessDetails {
+ s.ExpiresAt = &v
+ return s
+}
+
+// SetHostKeys sets the HostKeys field's value.
+func (s *InstanceAccessDetails) SetHostKeys(v []*HostKeyAttributes) *InstanceAccessDetails {
+ s.HostKeys = v
+ return s
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *InstanceAccessDetails) SetInstanceName(v string) *InstanceAccessDetails {
+ s.InstanceName = &v
+ return s
+}
+
+// SetIpAddress sets the IpAddress field's value.
+func (s *InstanceAccessDetails) SetIpAddress(v string) *InstanceAccessDetails {
+ s.IpAddress = &v
+ return s
+}
+
+// SetPassword sets the Password field's value.
+func (s *InstanceAccessDetails) SetPassword(v string) *InstanceAccessDetails {
+ s.Password = &v
+ return s
+}
+
+// SetPasswordData sets the PasswordData field's value.
+func (s *InstanceAccessDetails) SetPasswordData(v *PasswordData) *InstanceAccessDetails {
+ s.PasswordData = v
+ return s
+}
+
+// SetPrivateKey sets the PrivateKey field's value.
+func (s *InstanceAccessDetails) SetPrivateKey(v string) *InstanceAccessDetails {
+ s.PrivateKey = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *InstanceAccessDetails) SetProtocol(v string) *InstanceAccessDetails {
+ s.Protocol = &v
+ return s
+}
+
+// SetUsername sets the Username field's value.
+func (s *InstanceAccessDetails) SetUsername(v string) *InstanceAccessDetails {
+ s.Username = &v
+ return s
+}
+
+// Describes the Amazon Elastic Compute Cloud instance and related resources
+// to be created using the create cloud formation stack operation.
+type InstanceEntry struct {
+ _ struct{} `type:"structure"`
+
+ // The Availability Zone for the new Amazon EC2 instance.
+ //
+ // AvailabilityZone is a required field
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
+
+ // The instance type (e.g., t2.micro) to use for the new Amazon EC2 instance.
+ //
+ // InstanceType is a required field
+ InstanceType *string `locationName:"instanceType" type:"string" required:"true"`
+
+ // The port configuration to use for the new Amazon EC2 instance.
+ //
+ // The following configuration options are available:
+ //
+ // * DEFAULT - Use the default firewall settings from the Lightsail instance
+ // blueprint. If this is specified, then IPv4 and IPv6 will be configured
+ // for the new instance that is created in Amazon EC2.
+ //
+ // * INSTANCE - Use the configured firewall settings from the source Lightsail
+ // instance. If this is specified, the new instance that is created in Amazon
+ // EC2 will be configured to match the configuration of the source Lightsail
+ // instance. For example, if the source instance is configured for dual-stack
+ // (IPv4 and IPv6), then IPv4 and IPv6 will be configured for the new instance
+ // that is created in Amazon EC2. If the source instance is configured for
+ // IPv4 only, then only IPv4 will be configured for the new instance that
+ // is created in Amazon EC2.
+ //
+ // * NONE - Use the default Amazon EC2 security group. If this is specified,
+ // then only IPv4 will be configured for the new instance that is created
+ // in Amazon EC2.
+ //
+ // * CLOSED - All ports closed. If this is specified, then only IPv4 will
+ // be configured for the new instance that is created in Amazon EC2.
+ //
+ // If you configured lightsail-connect as a cidrListAliases on your instance,
+ // or if you chose to allow the Lightsail browser-based SSH or RDP clients to
+ // connect to your instance, that configuration is not carried over to your
+ // new Amazon EC2 instance.
+ //
+ // PortInfoSource is a required field
+ PortInfoSource *string `locationName:"portInfoSource" type:"string" required:"true" enum:"PortInfoSourceType"`
+
+ // The name of the export snapshot record, which contains the exported Lightsail
+ // instance snapshot that will be used as the source of the new Amazon EC2 instance.
+ //
+ // Use the get export snapshot records operation to get a list of export snapshot
+ // records that you can use to create a CloudFormation stack.
+ //
+ // SourceName is a required field
+ SourceName *string `locationName:"sourceName" type:"string" required:"true"`
+
+ // A launch script you can create that configures a server with additional user
+ // data. For example, you might want to run apt-get -y update.
+ //
+ // Depending on the machine image you choose, the command to get software on
+ // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu
+ // use apt-get, and FreeBSD uses pkg.
+ UserData *string `locationName:"userData" type:"string"`
+}
+
+// String returns the string representation
+func (s InstanceEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceEntry) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InstanceEntry) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InstanceEntry"}
+ if s.AvailabilityZone == nil {
+ invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
+ }
+ if s.InstanceType == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceType"))
+ }
+ if s.PortInfoSource == nil {
+ invalidParams.Add(request.NewErrParamRequired("PortInfoSource"))
+ }
+ if s.SourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("SourceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *InstanceEntry) SetAvailabilityZone(v string) *InstanceEntry {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetInstanceType sets the InstanceType field's value.
+func (s *InstanceEntry) SetInstanceType(v string) *InstanceEntry {
+ s.InstanceType = &v
+ return s
+}
+
+// SetPortInfoSource sets the PortInfoSource field's value.
+func (s *InstanceEntry) SetPortInfoSource(v string) *InstanceEntry {
+ s.PortInfoSource = &v
+ return s
+}
+
+// SetSourceName sets the SourceName field's value.
+func (s *InstanceEntry) SetSourceName(v string) *InstanceEntry {
+ s.SourceName = &v
+ return s
+}
+
+// SetUserData sets the UserData field's value.
+func (s *InstanceEntry) SetUserData(v string) *InstanceEntry {
+ s.UserData = &v
+ return s
+}
+
+// Describes the hardware for the instance.
+type InstanceHardware struct {
+ _ struct{} `type:"structure"`
+
+ // The number of vCPUs the instance has.
+ CpuCount *int64 `locationName:"cpuCount" type:"integer"`
+
+ // The disks attached to the instance.
+ Disks []*Disk `locationName:"disks" type:"list"`
+
+ // The amount of RAM in GB on the instance (e.g., 1.0).
+ RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"`
+}
+
+// String returns the string representation
+func (s InstanceHardware) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceHardware) GoString() string {
+ return s.String()
+}
+
+// SetCpuCount sets the CpuCount field's value.
+func (s *InstanceHardware) SetCpuCount(v int64) *InstanceHardware {
+ s.CpuCount = &v
+ return s
+}
+
+// SetDisks sets the Disks field's value.
+func (s *InstanceHardware) SetDisks(v []*Disk) *InstanceHardware {
+ s.Disks = v
+ return s
+}
+
+// SetRamSizeInGb sets the RamSizeInGb field's value.
+func (s *InstanceHardware) SetRamSizeInGb(v float64) *InstanceHardware {
+ s.RamSizeInGb = &v
+ return s
+}
+
+// Describes information about the health of the instance.
+type InstanceHealthSummary struct {
+ _ struct{} `type:"structure"`
+
+ // Describes the overall instance health. Valid values are below.
+ InstanceHealth *string `locationName:"instanceHealth" type:"string" enum:"InstanceHealthState"`
+
+ // More information about the instance health. If the instanceHealth is healthy,
+ // then an instanceHealthReason value is not provided.
+ //
+ // If instanceHealth is initial, the instanceHealthReason value can be one of
+ // the following:
+ //
+ // * Lb.RegistrationInProgress - The target instance is in the process of
+ // being registered with the load balancer.
+ //
+ // * Lb.InitialHealthChecking - The Lightsail load balancer is still sending
+ // the target instance the minimum number of health checks required to determine
+ // its health status.
+ //
+ // If instanceHealth is unhealthy, the instanceHealthReason value can be one
+ // of the following:
+ //
+ // * Instance.ResponseCodeMismatch - The health checks did not return an
+ // expected HTTP code.
+ //
+ // * Instance.Timeout - The health check requests timed out.
+ //
+ // * Instance.FailedHealthChecks - The health checks failed because the connection
+ // to the target instance timed out, the target instance response was malformed,
+ // or the target instance failed the health check for an unknown reason.
+ //
+ // * Lb.InternalError - The health checks failed due to an internal error.
+ //
+ // If instanceHealth is unused, the instanceHealthReason value can be one of
+ // the following:
+ //
+ // * Instance.NotRegistered - The target instance is not registered with
+ // the target group.
+ //
+ // * Instance.NotInUse - The target group is not used by any load balancer,
+ // or the target instance is in an Availability Zone that is not enabled
+ // for its load balancer.
+ //
+ // * Instance.IpUnusable - The target IP address is reserved for use by a
+ // Lightsail load balancer.
+ //
+ // * Instance.InvalidState - The target is in the stopped or terminated state.
+ //
+ // If instanceHealth is draining, the instanceHealthReason value can be one
+ // of the following:
+ //
+ // * Instance.DeregistrationInProgress - The target instance is in the process
+ // of being deregistered and the deregistration delay period has not expired.
+ InstanceHealthReason *string `locationName:"instanceHealthReason" type:"string" enum:"InstanceHealthReason"`
+
+ // The name of the Lightsail instance for which you are requesting health check
+ // data.
+ InstanceName *string `locationName:"instanceName" type:"string"`
+}
+
+// String returns the string representation
+func (s InstanceHealthSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceHealthSummary) GoString() string {
+ return s.String()
+}
+
+// SetInstanceHealth sets the InstanceHealth field's value.
+func (s *InstanceHealthSummary) SetInstanceHealth(v string) *InstanceHealthSummary {
+ s.InstanceHealth = &v
+ return s
+}
+
+// SetInstanceHealthReason sets the InstanceHealthReason field's value.
+func (s *InstanceHealthSummary) SetInstanceHealthReason(v string) *InstanceHealthSummary {
+ s.InstanceHealthReason = &v
+ return s
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *InstanceHealthSummary) SetInstanceName(v string) *InstanceHealthSummary {
+ s.InstanceName = &v
+ return s
+}
+
+// Describes monthly data transfer rates and port information for an instance.
+type InstanceNetworking struct {
+ _ struct{} `type:"structure"`
+
+ // The amount of data in GB allocated for monthly data transfers.
+ MonthlyTransfer *MonthlyTransfer `locationName:"monthlyTransfer" type:"structure"`
+
+ // An array of key-value pairs containing information about the ports on the
+ // instance.
+ Ports []*InstancePortInfo `locationName:"ports" type:"list"`
+}
+
+// String returns the string representation
+func (s InstanceNetworking) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceNetworking) GoString() string {
+ return s.String()
+}
+
+// SetMonthlyTransfer sets the MonthlyTransfer field's value.
+func (s *InstanceNetworking) SetMonthlyTransfer(v *MonthlyTransfer) *InstanceNetworking {
+ s.MonthlyTransfer = v
+ return s
+}
+
+// SetPorts sets the Ports field's value.
+func (s *InstanceNetworking) SetPorts(v []*InstancePortInfo) *InstanceNetworking {
+ s.Ports = v
+ return s
+}
+
+// Describes information about ports for an Amazon Lightsail instance.
+type InstancePortInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The access direction (inbound or outbound).
+ //
+ // Lightsail currently supports only inbound access direction.
+ AccessDirection *string `locationName:"accessDirection" type:"string" enum:"AccessDirection"`
+
+ // The location from which access is allowed. For example, Anywhere (0.0.0.0/0),
+ // or Custom if a specific IP address or range of IP addresses is allowed.
+ AccessFrom *string `locationName:"accessFrom" type:"string"`
+
+ // The type of access (Public or Private).
+ AccessType *string `locationName:"accessType" type:"string" enum:"PortAccessType"`
+
+ // An alias that defines access for a preconfigured range of IP addresses.
+ //
+ // The only alias currently supported is lightsail-connect, which allows IP
+ // addresses of the browser-based RDP/SSH client in the Lightsail console to
+ // connect to your instance.
+ CidrListAliases []*string `locationName:"cidrListAliases" type:"list"`
+
+ // The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are
+ // allowed to connect to an instance through the ports, and the protocol.
+ //
+ // The ipv6Cidrs parameter lists the IPv6 addresses that are allowed to connect
+ // to an instance.
+ //
+ // For more information about CIDR block notation, see Classless Inter-Domain
+ // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
+ // on Wikipedia.
+ Cidrs []*string `locationName:"cidrs" type:"list"`
+
+ // The common name of the port information.
+ CommonName *string `locationName:"commonName" type:"string"`
+
+ // The first port in a range of open ports on an instance.
+ //
+ // Allowed ports:
+ //
+ // * TCP and UDP - 0 to 65535
+ //
+ // * ICMP - The ICMP type for IPv4 addresses. For example, specify 8 as the
+ // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP
+ // Ping. For more information, see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages)
+ // on Wikipedia.
+ //
+ // * ICMPv6 - The ICMP type for IPv6 addresses. For example, specify 128
+ // as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more
+ // information, see Internet Control Message Protocol for IPv6 (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol_for_IPv6).
+ FromPort *int64 `locationName:"fromPort" type:"integer"`
+
+ // The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are
+ // allowed to connect to an instance through the ports, and the protocol. Only
+ // devices with an IPv6 address can connect to an instance through IPv6; otherwise,
+ // IPv4 should be used.
+ //
+ // The cidrs parameter lists the IPv4 addresses that are allowed to connect
+ // to an instance.
+ //
+ // For more information about CIDR block notation, see Classless Inter-Domain
+ // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
+ // on Wikipedia.
+ Ipv6Cidrs []*string `locationName:"ipv6Cidrs" type:"list"`
+
+ // The IP protocol name.
+ //
+ // The name can be one of the following:
+ //
+ // * tcp - Transmission Control Protocol (TCP) provides reliable, ordered,
+ // and error-checked delivery of streamed data between applications running
+ // on hosts communicating by an IP network. If you have an application that
+ // doesn't require reliable data stream service, use UDP instead.
+ //
+ // * all - All transport layer protocol types. For more general information,
+ // see Transport layer (https://en.wikipedia.org/wiki/Transport_layer) on
+ // Wikipedia.
+ //
+ // * udp - With User Datagram Protocol (UDP), computer applications can send
+ // messages (or datagrams) to other hosts on an Internet Protocol (IP) network.
+ // Prior communications are not required to set up transmission channels
+ // or data paths. Applications that don't require reliable data stream service
+ // can use UDP, which provides a connectionless datagram service that emphasizes
+ // reduced latency over reliability. If you do require reliable data stream
+ // service, use TCP instead.
+ //
+ // * icmp - Internet Control Message Protocol (ICMP) is used to send error
+ // messages and operational information indicating success or failure when
+ // communicating with an instance. For example, an error is indicated when
+ // an instance could not be reached. When you specify icmp as the protocol,
+ // you must specify the ICMP type using the fromPort parameter, and ICMP
+ // code using the toPort parameter.
+ Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"`
+
+ // The last port in a range of open ports on an instance.
+ //
+ // Allowed ports:
+ //
+ // * TCP and UDP - 0 to 65535
+ //
+ // * ICMP - The ICMP code for IPv4 addresses. For example, specify 8 as the
+ // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP
+ // Ping. For more information, see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages)
+ // on Wikipedia.
+ //
+ // * ICMPv6 - The ICMP code for IPv6 addresses. For example, specify 128
+ // as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more
+ // information, see Internet Control Message Protocol for IPv6 (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol_for_IPv6).
+ ToPort *int64 `locationName:"toPort" type:"integer"`
+}
+
+// String returns the string representation
+func (s InstancePortInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstancePortInfo) GoString() string {
+ return s.String()
+}
+
+// SetAccessDirection sets the AccessDirection field's value.
+func (s *InstancePortInfo) SetAccessDirection(v string) *InstancePortInfo {
+ s.AccessDirection = &v
+ return s
+}
+
+// SetAccessFrom sets the AccessFrom field's value.
+func (s *InstancePortInfo) SetAccessFrom(v string) *InstancePortInfo {
+ s.AccessFrom = &v
+ return s
+}
+
+// SetAccessType sets the AccessType field's value.
+func (s *InstancePortInfo) SetAccessType(v string) *InstancePortInfo {
+ s.AccessType = &v
+ return s
+}
+
+// SetCidrListAliases sets the CidrListAliases field's value.
+func (s *InstancePortInfo) SetCidrListAliases(v []*string) *InstancePortInfo {
+ s.CidrListAliases = v
+ return s
+}
+
+// SetCidrs sets the Cidrs field's value.
+func (s *InstancePortInfo) SetCidrs(v []*string) *InstancePortInfo {
+ s.Cidrs = v
+ return s
+}
+
+// SetCommonName sets the CommonName field's value.
+func (s *InstancePortInfo) SetCommonName(v string) *InstancePortInfo {
+ s.CommonName = &v
+ return s
+}
+
+// SetFromPort sets the FromPort field's value.
+func (s *InstancePortInfo) SetFromPort(v int64) *InstancePortInfo {
+ s.FromPort = &v
+ return s
+}
+
+// SetIpv6Cidrs sets the Ipv6Cidrs field's value.
+func (s *InstancePortInfo) SetIpv6Cidrs(v []*string) *InstancePortInfo {
+ s.Ipv6Cidrs = v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *InstancePortInfo) SetProtocol(v string) *InstancePortInfo {
+ s.Protocol = &v
+ return s
+}
+
+// SetToPort sets the ToPort field's value.
+func (s *InstancePortInfo) SetToPort(v int64) *InstancePortInfo {
+ s.ToPort = &v
+ return s
+}
+
+// Describes open ports on an instance, the IP addresses allowed to connect
+// to the instance through the ports, and the protocol.
+type InstancePortState struct {
+ _ struct{} `type:"structure"`
+
+ // An alias that defines access for a preconfigured range of IP addresses.
+ //
+ // The only alias currently supported is lightsail-connect, which allows IP
+ // addresses of the browser-based RDP/SSH client in the Lightsail console to
+ // connect to your instance.
+ CidrListAliases []*string `locationName:"cidrListAliases" type:"list"`
+
+ // The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are
+ // allowed to connect to an instance through the ports, and the protocol.
+ //
+ // The ipv6Cidrs parameter lists the IPv6 addresses that are allowed to connect
+ // to an instance.
+ //
+ // For more information about CIDR block notation, see Classless Inter-Domain
+ // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
+ // on Wikipedia.
+ Cidrs []*string `locationName:"cidrs" type:"list"`
+
+ // The first port in a range of open ports on an instance.
+ //
+ // Allowed ports:
+ //
+ // * TCP and UDP - 0 to 65535
+ //
+ // * ICMP - The ICMP type for IPv4 addresses. For example, specify 8 as the
+ // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP
+ // Ping. For more information, see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages)
+ // on Wikipedia.
+ //
+ // * ICMPv6 - The ICMP type for IPv6 addresses. For example, specify 128
+ // as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more
+ // information, see Internet Control Message Protocol for IPv6 (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol_for_IPv6).
+ FromPort *int64 `locationName:"fromPort" type:"integer"`
+
+ // The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are
+ // allowed to connect to an instance through the ports, and the protocol. Only
+ // devices with an IPv6 address can connect to an instance through IPv6; otherwise,
+ // IPv4 should be used.
+ //
+ // The cidrs parameter lists the IPv4 addresses that are allowed to connect
+ // to an instance.
+ //
+ // For more information about CIDR block notation, see Classless Inter-Domain
+ // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
+ // on Wikipedia.
+ Ipv6Cidrs []*string `locationName:"ipv6Cidrs" type:"list"`
+
+ // The IP protocol name.
+ //
+ // The name can be one of the following:
+ //
+ // * tcp - Transmission Control Protocol (TCP) provides reliable, ordered,
+ // and error-checked delivery of streamed data between applications running
+ // on hosts communicating by an IP network. If you have an application that
+ // doesn't require reliable data stream service, use UDP instead.
+ //
+ // * all - All transport layer protocol types. For more general information,
+ // see Transport layer (https://en.wikipedia.org/wiki/Transport_layer) on
+ // Wikipedia.
+ //
+ // * udp - With User Datagram Protocol (UDP), computer applications can send
+ // messages (or datagrams) to other hosts on an Internet Protocol (IP) network.
+ // Prior communications are not required to set up transmission channels
+ // or data paths. Applications that don't require reliable data stream service
+ // can use UDP, which provides a connectionless datagram service that emphasizes
+ // reduced latency over reliability. If you do require reliable data stream
+ // service, use TCP instead.
+ //
+ // * icmp - Internet Control Message Protocol (ICMP) is used to send error
+ // messages and operational information indicating success or failure when
+ // communicating with an instance. For example, an error is indicated when
+ // an instance could not be reached. When you specify icmp as the protocol,
+ // you must specify the ICMP type using the fromPort parameter, and ICMP
+ // code using the toPort parameter.
+ Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"`
+
+ // Specifies whether the instance port is open or closed.
+ //
+ // The port state for Lightsail instances is always open.
+ State *string `locationName:"state" type:"string" enum:"PortState"`
+
+ // The last port in a range of open ports on an instance.
+ //
+ // Allowed ports:
+ //
+ // * TCP and UDP - 0 to 65535
+ //
+ // * ICMP - The ICMP code for IPv4 addresses. For example, specify 8 as the
+ // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP
+ // Ping. For more information, see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages)
+ // on Wikipedia.
+ //
+ // * ICMPv6 - The ICMP code for IPv6 addresses. For example, specify 128
+ // as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more
+ // information, see Internet Control Message Protocol for IPv6 (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol_for_IPv6).
+ ToPort *int64 `locationName:"toPort" type:"integer"`
+}
+
+// String returns the string representation
+func (s InstancePortState) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstancePortState) GoString() string {
+ return s.String()
+}
+
+// SetCidrListAliases sets the CidrListAliases field's value.
+func (s *InstancePortState) SetCidrListAliases(v []*string) *InstancePortState {
+ s.CidrListAliases = v
+ return s
+}
+
+// SetCidrs sets the Cidrs field's value.
+func (s *InstancePortState) SetCidrs(v []*string) *InstancePortState {
+ s.Cidrs = v
+ return s
+}
+
+// SetFromPort sets the FromPort field's value.
+func (s *InstancePortState) SetFromPort(v int64) *InstancePortState {
+ s.FromPort = &v
+ return s
+}
+
+// SetIpv6Cidrs sets the Ipv6Cidrs field's value.
+func (s *InstancePortState) SetIpv6Cidrs(v []*string) *InstancePortState {
+ s.Ipv6Cidrs = v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *InstancePortState) SetProtocol(v string) *InstancePortState {
+ s.Protocol = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *InstancePortState) SetState(v string) *InstancePortState {
+ s.State = &v
+ return s
+}
+
+// SetToPort sets the ToPort field's value.
+func (s *InstancePortState) SetToPort(v int64) *InstancePortState {
+ s.ToPort = &v
+ return s
+}
+
+// Describes an instance snapshot.
+type InstanceSnapshot struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the snapshot (e.g., arn:aws:lightsail:us-east-2:123456789101:InstanceSnapshot/d23b5706-3322-4d83-81e5-12345EXAMPLE).
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The timestamp when the snapshot was created (e.g., 1479907467.024).
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // An array of disk objects containing information about all block storage disks.
+ FromAttachedDisks []*Disk `locationName:"fromAttachedDisks" type:"list"`
+
+ // The blueprint ID from which you created the snapshot (e.g., os_debian_8_3).
+ // A blueprint is a virtual private server (or instance) image used to create
+ // instances quickly.
+ FromBlueprintId *string `locationName:"fromBlueprintId" type:"string"`
+
+ // The bundle ID from which you created the snapshot (e.g., micro_1_0).
+ FromBundleId *string `locationName:"fromBundleId" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the instance from which the snapshot was
+ // created (e.g., arn:aws:lightsail:us-east-2:123456789101:Instance/64b8404c-ccb1-430b-8daf-12345EXAMPLE).
+ FromInstanceArn *string `locationName:"fromInstanceArn" type:"string"`
+
+ // The instance from which the snapshot was created.
+ FromInstanceName *string `locationName:"fromInstanceName" type:"string"`
+
+ // A Boolean value indicating whether the snapshot was created from an automatic
+ // snapshot.
+ IsFromAutoSnapshot *bool `locationName:"isFromAutoSnapshot" type:"boolean"`
+
+ // The region name and Availability Zone where you created the snapshot.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the snapshot.
+ Name *string `locationName:"name" type:"string"`
+
+ // The progress of the snapshot.
+ //
+ // This is populated only for disk snapshots, and is null for instance snapshots.
+ Progress *string `locationName:"progress" type:"string"`
+
+ // The type of resource (usually InstanceSnapshot).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The size in GB of the SSD.
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer"`
+
+ // The state the snapshot is in.
+ State *string `locationName:"state" type:"string" enum:"InstanceSnapshotState"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about an instance or another resource in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s InstanceSnapshot) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceSnapshot) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *InstanceSnapshot) SetArn(v string) *InstanceSnapshot {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *InstanceSnapshot) SetCreatedAt(v time.Time) *InstanceSnapshot {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetFromAttachedDisks sets the FromAttachedDisks field's value.
+func (s *InstanceSnapshot) SetFromAttachedDisks(v []*Disk) *InstanceSnapshot {
+ s.FromAttachedDisks = v
+ return s
+}
+
+// SetFromBlueprintId sets the FromBlueprintId field's value.
+func (s *InstanceSnapshot) SetFromBlueprintId(v string) *InstanceSnapshot {
+ s.FromBlueprintId = &v
+ return s
+}
+
+// SetFromBundleId sets the FromBundleId field's value.
+func (s *InstanceSnapshot) SetFromBundleId(v string) *InstanceSnapshot {
+ s.FromBundleId = &v
+ return s
+}
+
+// SetFromInstanceArn sets the FromInstanceArn field's value.
+func (s *InstanceSnapshot) SetFromInstanceArn(v string) *InstanceSnapshot {
+ s.FromInstanceArn = &v
+ return s
+}
+
+// SetFromInstanceName sets the FromInstanceName field's value.
+func (s *InstanceSnapshot) SetFromInstanceName(v string) *InstanceSnapshot {
+ s.FromInstanceName = &v
+ return s
+}
+
+// SetIsFromAutoSnapshot sets the IsFromAutoSnapshot field's value.
+func (s *InstanceSnapshot) SetIsFromAutoSnapshot(v bool) *InstanceSnapshot {
+ s.IsFromAutoSnapshot = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *InstanceSnapshot) SetLocation(v *ResourceLocation) *InstanceSnapshot {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *InstanceSnapshot) SetName(v string) *InstanceSnapshot {
+ s.Name = &v
+ return s
+}
+
+// SetProgress sets the Progress field's value.
+func (s *InstanceSnapshot) SetProgress(v string) *InstanceSnapshot {
+ s.Progress = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *InstanceSnapshot) SetResourceType(v string) *InstanceSnapshot {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *InstanceSnapshot) SetSizeInGb(v int64) *InstanceSnapshot {
+ s.SizeInGb = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *InstanceSnapshot) SetState(v string) *InstanceSnapshot {
+ s.State = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *InstanceSnapshot) SetSupportCode(v string) *InstanceSnapshot {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *InstanceSnapshot) SetTags(v []*Tag) *InstanceSnapshot {
+ s.Tags = v
+ return s
+}
+
+// Describes an instance snapshot.
+type InstanceSnapshotInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The blueprint ID from which the source instance (e.g., os_debian_8_3).
+ FromBlueprintId *string `locationName:"fromBlueprintId" type:"string"`
+
+ // The bundle ID from which the source instance was created (e.g., micro_1_0).
+ FromBundleId *string `locationName:"fromBundleId" type:"string"`
+
+ // A list of objects describing the disks that were attached to the source instance.
+ FromDiskInfo []*DiskInfo `locationName:"fromDiskInfo" type:"list"`
+}
+
+// String returns the string representation
+func (s InstanceSnapshotInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceSnapshotInfo) GoString() string {
+ return s.String()
+}
+
+// SetFromBlueprintId sets the FromBlueprintId field's value.
+func (s *InstanceSnapshotInfo) SetFromBlueprintId(v string) *InstanceSnapshotInfo {
+ s.FromBlueprintId = &v
+ return s
+}
+
+// SetFromBundleId sets the FromBundleId field's value.
+func (s *InstanceSnapshotInfo) SetFromBundleId(v string) *InstanceSnapshotInfo {
+ s.FromBundleId = &v
+ return s
+}
+
+// SetFromDiskInfo sets the FromDiskInfo field's value.
+func (s *InstanceSnapshotInfo) SetFromDiskInfo(v []*DiskInfo) *InstanceSnapshotInfo {
+ s.FromDiskInfo = v
+ return s
+}
+
+// Describes the virtual private server (or instance) status.
+type InstanceState struct {
+ _ struct{} `type:"structure"`
+
+ // The status code for the instance.
+ Code *int64 `locationName:"code" type:"integer"`
+
+ // The state of the instance (e.g., running or pending).
+ Name *string `locationName:"name" type:"string"`
+}
+
+// String returns the string representation
+func (s InstanceState) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InstanceState) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *InstanceState) SetCode(v int64) *InstanceState {
+ s.Code = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *InstanceState) SetName(v string) *InstanceState {
+ s.Name = &v
+ return s
+}
+
+// Lightsail throws this exception when user input does not conform to the validation
+// rules of an input field.
+//
+// Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+// AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+// view, or edit these resources.
+type InvalidInputException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Code_ *string `locationName:"code" type:"string"`
+
+ Docs *string `locationName:"docs" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ Tip *string `locationName:"tip" type:"string"`
+}
+
+// String returns the string representation
+func (s InvalidInputException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InvalidInputException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidInputException(v protocol.ResponseMetadata) error {
+ return &InvalidInputException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidInputException) Code() string {
+ return "InvalidInputException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidInputException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidInputException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidInputException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidInputException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidInputException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type IsVpcPeeredInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s IsVpcPeeredInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s IsVpcPeeredInput) GoString() string {
+ return s.String()
+}
+
+type IsVpcPeeredOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Returns true if the Lightsail VPC is peered; otherwise, false.
+ IsPeered *bool `locationName:"isPeered" type:"boolean"`
+}
+
+// String returns the string representation
+func (s IsVpcPeeredOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s IsVpcPeeredOutput) GoString() string {
+ return s.String()
+}
+
+// SetIsPeered sets the IsPeered field's value.
+func (s *IsVpcPeeredOutput) SetIsPeered(v bool) *IsVpcPeeredOutput {
+ s.IsPeered = &v
+ return s
+}
+
+// Describes an SSH key pair.
+type KeyPair struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the key pair (e.g., arn:aws:lightsail:us-east-2:123456789101:KeyPair/05859e3d-331d-48ba-9034-12345EXAMPLE).
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The timestamp when the key pair was created (e.g., 1479816991.349).
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The RSA fingerprint of the key pair.
+ Fingerprint *string `locationName:"fingerprint" type:"string"`
+
+ // The region name and Availability Zone where the key pair was created.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The friendly name of the SSH key pair.
+ Name *string `locationName:"name" type:"string"`
+
+ // The resource type (usually KeyPair).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about an instance or another resource in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s KeyPair) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeyPair) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *KeyPair) SetArn(v string) *KeyPair {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *KeyPair) SetCreatedAt(v time.Time) *KeyPair {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetFingerprint sets the Fingerprint field's value.
+func (s *KeyPair) SetFingerprint(v string) *KeyPair {
+ s.Fingerprint = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *KeyPair) SetLocation(v *ResourceLocation) *KeyPair {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *KeyPair) SetName(v string) *KeyPair {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *KeyPair) SetResourceType(v string) *KeyPair {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *KeyPair) SetSupportCode(v string) *KeyPair {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *KeyPair) SetTags(v []*Tag) *KeyPair {
+ s.Tags = v
+ return s
+}
+
+// Describes an Amazon Lightsail content delivery network (CDN) distribution.
+type LightsailDistribution struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether the bundle that is currently applied to your distribution,
+ // specified using the distributionName parameter, can be changed to another
+ // bundle.
+ //
+ // Use the UpdateDistributionBundle action to change your distribution's bundle.
+ AbleToUpdateBundle *bool `locationName:"ableToUpdateBundle" type:"boolean"`
+
+ // The alternate domain names of the distribution.
+ AlternativeDomainNames []*string `locationName:"alternativeDomainNames" type:"list"`
+
+ // The Amazon Resource Name (ARN) of the distribution.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The ID of the bundle currently applied to the distribution.
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // An object that describes the cache behavior settings of the distribution.
+ CacheBehaviorSettings *CacheSettings `locationName:"cacheBehaviorSettings" type:"structure"`
+
+ // An array of objects that describe the per-path cache behavior of the distribution.
+ CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"`
+
+ // The name of the SSL/TLS certificate attached to the distribution, if any.
+ CertificateName *string `locationName:"certificateName" type:"string"`
+
+ // The timestamp when the distribution was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // An object that describes the default cache behavior of the distribution.
+ DefaultCacheBehavior *CacheBehavior `locationName:"defaultCacheBehavior" type:"structure"`
+
+ // The domain name of the distribution.
+ DomainName *string `locationName:"domainName" type:"string"`
+
+ // The IP address type of the distribution.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
+
+ // Indicates whether the distribution is enabled.
+ IsEnabled *bool `locationName:"isEnabled" type:"boolean"`
+
+ // An object that describes the location of the distribution, such as the AWS
+ // Region and Availability Zone.
+ //
+ // Lightsail distributions are global resources that can reference an origin
+ // in any AWS Region, and distribute its content globally. However, all distributions
+ // are located in the us-east-1 Region.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the distribution.
+ Name *string `locationName:"name" type:"string"`
+
+ // An object that describes the origin resource of the distribution, such as
+ // a Lightsail instance or load balancer.
+ //
+ // The distribution pulls, caches, and serves content from the origin.
+ Origin *Origin `locationName:"origin" type:"structure"`
+
+ // The public DNS of the origin.
+ OriginPublicDNS *string `locationName:"originPublicDNS" type:"string"`
+
+ // The Lightsail resource type (e.g., Distribution).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The status of the distribution.
+ Status *string `locationName:"status" type:"string"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about your Lightsail distribution. This code enables our support
+ // team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s LightsailDistribution) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LightsailDistribution) GoString() string {
+ return s.String()
+}
+
+// SetAbleToUpdateBundle sets the AbleToUpdateBundle field's value.
+func (s *LightsailDistribution) SetAbleToUpdateBundle(v bool) *LightsailDistribution {
+ s.AbleToUpdateBundle = &v
+ return s
+}
+
+// SetAlternativeDomainNames sets the AlternativeDomainNames field's value.
+func (s *LightsailDistribution) SetAlternativeDomainNames(v []*string) *LightsailDistribution {
+ s.AlternativeDomainNames = v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *LightsailDistribution) SetArn(v string) *LightsailDistribution {
+ s.Arn = &v
+ return s
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *LightsailDistribution) SetBundleId(v string) *LightsailDistribution {
+ s.BundleId = &v
+ return s
+}
+
+// SetCacheBehaviorSettings sets the CacheBehaviorSettings field's value.
+func (s *LightsailDistribution) SetCacheBehaviorSettings(v *CacheSettings) *LightsailDistribution {
+ s.CacheBehaviorSettings = v
+ return s
+}
+
+// SetCacheBehaviors sets the CacheBehaviors field's value.
+func (s *LightsailDistribution) SetCacheBehaviors(v []*CacheBehaviorPerPath) *LightsailDistribution {
+ s.CacheBehaviors = v
+ return s
+}
+
+// SetCertificateName sets the CertificateName field's value.
+func (s *LightsailDistribution) SetCertificateName(v string) *LightsailDistribution {
+ s.CertificateName = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *LightsailDistribution) SetCreatedAt(v time.Time) *LightsailDistribution {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value.
+func (s *LightsailDistribution) SetDefaultCacheBehavior(v *CacheBehavior) *LightsailDistribution {
+ s.DefaultCacheBehavior = v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *LightsailDistribution) SetDomainName(v string) *LightsailDistribution {
+ s.DomainName = &v
+ return s
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *LightsailDistribution) SetIpAddressType(v string) *LightsailDistribution {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetIsEnabled sets the IsEnabled field's value.
+func (s *LightsailDistribution) SetIsEnabled(v bool) *LightsailDistribution {
+ s.IsEnabled = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *LightsailDistribution) SetLocation(v *ResourceLocation) *LightsailDistribution {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *LightsailDistribution) SetName(v string) *LightsailDistribution {
+ s.Name = &v
+ return s
+}
+
+// SetOrigin sets the Origin field's value.
+func (s *LightsailDistribution) SetOrigin(v *Origin) *LightsailDistribution {
+ s.Origin = v
+ return s
+}
+
+// SetOriginPublicDNS sets the OriginPublicDNS field's value.
+func (s *LightsailDistribution) SetOriginPublicDNS(v string) *LightsailDistribution {
+ s.OriginPublicDNS = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *LightsailDistribution) SetResourceType(v string) *LightsailDistribution {
+ s.ResourceType = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *LightsailDistribution) SetStatus(v string) *LightsailDistribution {
+ s.Status = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *LightsailDistribution) SetSupportCode(v string) *LightsailDistribution {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *LightsailDistribution) SetTags(v []*Tag) *LightsailDistribution {
+ s.Tags = v
+ return s
+}
+
+// Describes a load balancer.
+type LoadBalancer struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the load balancer.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // A string to string map of the configuration options for your load balancer.
+ // Valid values are listed below.
+ ConfigurationOptions map[string]*string `locationName:"configurationOptions" type:"map"`
+
+ // The date when your load balancer was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The DNS name of your Lightsail load balancer.
+ DnsName *string `locationName:"dnsName" type:"string"`
+
+ // The path you specified to perform your health checks. If no path is specified,
+ // the load balancer tries to make a request to the default (root) page.
+ HealthCheckPath *string `locationName:"healthCheckPath" type:"string"`
+
+ // An array of InstanceHealthSummary objects describing the health of the load
+ // balancer.
+ InstanceHealthSummary []*InstanceHealthSummary `locationName:"instanceHealthSummary" type:"list"`
+
+ // The port where the load balancer will direct traffic to your Lightsail instances.
+ // For HTTP traffic, it's port 80. For HTTPS traffic, it's port 443.
+ InstancePort *int64 `locationName:"instancePort" type:"integer"`
+
+ // The IP address type of the load balancer.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
+
+ // The AWS Region where your load balancer was created (e.g., us-east-2a). Lightsail
+ // automatically creates your load balancer across Availability Zones.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the load balancer (e.g., my-load-balancer).
+ Name *string `locationName:"name" type:"string"`
+
+ // The protocol you have enabled for your load balancer. Valid values are below.
+ //
+ // You can't just have HTTP_HTTPS, but you can have just HTTP.
+ Protocol *string `locationName:"protocol" type:"string" enum:"LoadBalancerProtocol"`
+
+ // An array of public port settings for your load balancer. For HTTP, use port
+ // 80. For HTTPS, use port 443.
+ PublicPorts []*int64 `locationName:"publicPorts" type:"list"`
+
+ // The resource type (e.g., LoadBalancer.
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The status of your load balancer. Valid values are below.
+ State *string `locationName:"state" type:"string" enum:"LoadBalancerState"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about your Lightsail load balancer. This code enables our support
+ // team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+
+ // An array of LoadBalancerTlsCertificateSummary objects that provide additional
+ // information about the SSL/TLS certificates. For example, if true, the certificate
+ // is attached to the load balancer.
+ TlsCertificateSummaries []*LoadBalancerTlsCertificateSummary `locationName:"tlsCertificateSummaries" type:"list"`
+}
+
+// String returns the string representation
+func (s LoadBalancer) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoadBalancer) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *LoadBalancer) SetArn(v string) *LoadBalancer {
+ s.Arn = &v
+ return s
+}
+
+// SetConfigurationOptions sets the ConfigurationOptions field's value.
+func (s *LoadBalancer) SetConfigurationOptions(v map[string]*string) *LoadBalancer {
+ s.ConfigurationOptions = v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *LoadBalancer) SetCreatedAt(v time.Time) *LoadBalancer {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDnsName sets the DnsName field's value.
+func (s *LoadBalancer) SetDnsName(v string) *LoadBalancer {
+ s.DnsName = &v
+ return s
+}
+
+// SetHealthCheckPath sets the HealthCheckPath field's value.
+func (s *LoadBalancer) SetHealthCheckPath(v string) *LoadBalancer {
+ s.HealthCheckPath = &v
+ return s
+}
+
+// SetInstanceHealthSummary sets the InstanceHealthSummary field's value.
+func (s *LoadBalancer) SetInstanceHealthSummary(v []*InstanceHealthSummary) *LoadBalancer {
+ s.InstanceHealthSummary = v
+ return s
+}
+
+// SetInstancePort sets the InstancePort field's value.
+func (s *LoadBalancer) SetInstancePort(v int64) *LoadBalancer {
+ s.InstancePort = &v
+ return s
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *LoadBalancer) SetIpAddressType(v string) *LoadBalancer {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *LoadBalancer) SetLocation(v *ResourceLocation) *LoadBalancer {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *LoadBalancer) SetName(v string) *LoadBalancer {
+ s.Name = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *LoadBalancer) SetProtocol(v string) *LoadBalancer {
+ s.Protocol = &v
+ return s
+}
+
+// SetPublicPorts sets the PublicPorts field's value.
+func (s *LoadBalancer) SetPublicPorts(v []*int64) *LoadBalancer {
+ s.PublicPorts = v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *LoadBalancer) SetResourceType(v string) *LoadBalancer {
+ s.ResourceType = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *LoadBalancer) SetState(v string) *LoadBalancer {
+ s.State = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *LoadBalancer) SetSupportCode(v string) *LoadBalancer {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *LoadBalancer) SetTags(v []*Tag) *LoadBalancer {
+ s.Tags = v
+ return s
+}
+
+// SetTlsCertificateSummaries sets the TlsCertificateSummaries field's value.
+func (s *LoadBalancer) SetTlsCertificateSummaries(v []*LoadBalancerTlsCertificateSummary) *LoadBalancer {
+ s.TlsCertificateSummaries = v
+ return s
+}
+
+// Describes a load balancer SSL/TLS certificate.
+//
+// TLS is just an updated, more secure version of Secure Socket Layer (SSL).
+type LoadBalancerTlsCertificate struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the SSL/TLS certificate.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The time when you created your SSL/TLS certificate.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The domain name for your SSL/TLS certificate.
+ DomainName *string `locationName:"domainName" type:"string"`
+
+ // An array of LoadBalancerTlsCertificateDomainValidationRecord objects describing
+ // the records.
+ DomainValidationRecords []*LoadBalancerTlsCertificateDomainValidationRecord `locationName:"domainValidationRecords" type:"list"`
+
+ // The validation failure reason, if any, of the certificate.
+ //
+ // The following failure reasons are possible:
+ //
+ // * NO_AVAILABLE_CONTACTS - This failure applies to email validation, which
+ // is not available for Lightsail certificates.
+ //
+ // * ADDITIONAL_VERIFICATION_REQUIRED - Lightsail requires additional information
+ // to process this certificate request. This can happen as a fraud-protection
+ // measure, such as when the domain ranks within the Alexa top 1000 websites.
+ // To provide the required information, use the AWS Support Center (https://console.aws.amazon.com/support/home)
+ // to contact AWS Support. You cannot request a certificate for Amazon-owned
+ // domain names such as those ending in amazonaws.com, cloudfront.net, or
+ // elasticbeanstalk.com.
+ //
+ // * DOMAIN_NOT_ALLOWED - One or more of the domain names in the certificate
+ // request was reported as an unsafe domain by VirusTotal (https://www.virustotal.com/gui/home/url).
+ // To correct the problem, search for your domain name on the VirusTotal
+ // (https://www.virustotal.com/gui/home/url) website. If your domain is reported
+ // as suspicious, see Google Help for Hacked Websites (https://developers.google.com/web/fundamentals/security/hacked)
+ // to learn what you can do. If you believe that the result is a false positive,
+ // notify the organization that is reporting the domain. VirusTotal is an
+ // aggregate of several antivirus and URL scanners and cannot remove your
+ // domain from a block list itself. After you correct the problem and the
+ // VirusTotal registry has been updated, request a new certificate. If you
+ // see this error and your domain is not included in the VirusTotal list,
+ // visit the AWS Support Center (https://console.aws.amazon.com/support/home)
+ // and create a case.
+ //
+ // * INVALID_PUBLIC_DOMAIN - One or more of the domain names in the certificate
+ // request is not valid. Typically, this is because a domain name in the
+ // request is not a valid top-level domain. Try to request a certificate
+ // again, correcting any spelling errors or typos that were in the failed
+ // request, and ensure that all domain names in the request are for valid
+ // top-level domains. For example, you cannot request a certificate for example.invalidpublicdomain
+ // because invalidpublicdomain is not a valid top-level domain.
+ //
+ // * OTHER - Typically, this failure occurs when there is a typographical
+ // error in one or more of the domain names in the certificate request. Try
+ // to request a certificate again, correcting any spelling errors or typos
+ // that were in the failed request.
+ FailureReason *string `locationName:"failureReason" type:"string" enum:"LoadBalancerTlsCertificateFailureReason"`
+
+ // When true, the SSL/TLS certificate is attached to the Lightsail load balancer.
+ IsAttached *bool `locationName:"isAttached" type:"boolean"`
+
+ // The time when the SSL/TLS certificate was issued.
+ IssuedAt *time.Time `locationName:"issuedAt" type:"timestamp"`
+
+ // The issuer of the certificate.
+ Issuer *string `locationName:"issuer" type:"string"`
+
+ // The algorithm used to generate the key pair (the public and private key).
+ KeyAlgorithm *string `locationName:"keyAlgorithm" type:"string"`
+
+ // The load balancer name where your SSL/TLS certificate is attached.
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string"`
+
+ // The AWS Region and Availability Zone where you created your certificate.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the SSL/TLS certificate (e.g., my-certificate).
+ Name *string `locationName:"name" type:"string"`
+
+ // The timestamp when the SSL/TLS certificate expires.
+ NotAfter *time.Time `locationName:"notAfter" type:"timestamp"`
+
+ // The timestamp when the SSL/TLS certificate is first valid.
+ NotBefore *time.Time `locationName:"notBefore" type:"timestamp"`
+
+ // An object that describes the status of the certificate renewal managed by
+ // Lightsail.
+ RenewalSummary *LoadBalancerTlsCertificateRenewalSummary `locationName:"renewalSummary" type:"structure"`
+
+ // The resource type (e.g., LoadBalancerTlsCertificate).
+ //
+ // * Instance - A Lightsail instance (a virtual private server)
+ //
+ // * StaticIp - A static IP address
+ //
+ // * KeyPair - The key pair used to connect to a Lightsail instance
+ //
+ // * InstanceSnapshot - A Lightsail instance snapshot
+ //
+ // * Domain - A DNS zone
+ //
+ // * PeeredVpc - A peered VPC
+ //
+ // * LoadBalancer - A Lightsail load balancer
+ //
+ // * LoadBalancerTlsCertificate - An SSL/TLS certificate associated with
+ // a Lightsail load balancer
+ //
+ // * Disk - A Lightsail block storage disk
+ //
+ // * DiskSnapshot - A block storage disk snapshot
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The reason the certificate was revoked. This value is present only when the
+ // certificate status is REVOKED.
+ RevocationReason *string `locationName:"revocationReason" type:"string" enum:"LoadBalancerTlsCertificateRevocationReason"`
+
+ // The timestamp when the certificate was revoked. This value is present only
+ // when the certificate status is REVOKED.
+ RevokedAt *time.Time `locationName:"revokedAt" type:"timestamp"`
+
+ // The serial number of the certificate.
+ Serial *string `locationName:"serial" type:"string"`
+
+ // The algorithm that was used to sign the certificate.
+ SignatureAlgorithm *string `locationName:"signatureAlgorithm" type:"string"`
+
+ // The validation status of the SSL/TLS certificate. Valid values are below.
+ Status *string `locationName:"status" type:"string" enum:"LoadBalancerTlsCertificateStatus"`
+
+ // The name of the entity that is associated with the public key contained in
+ // the certificate.
+ Subject *string `locationName:"subject" type:"string"`
+
+ // An array of strings that specify the alternate domains (e.g., example2.com)
+ // and subdomains (e.g., blog.example.com) for the certificate.
+ SubjectAlternativeNames []*string `locationName:"subjectAlternativeNames" type:"list"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about your Lightsail load balancer or SSL/TLS certificate. This
+ // code enables our support team to look up your Lightsail information more
+ // easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s LoadBalancerTlsCertificate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoadBalancerTlsCertificate) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *LoadBalancerTlsCertificate) SetArn(v string) *LoadBalancerTlsCertificate {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *LoadBalancerTlsCertificate) SetCreatedAt(v time.Time) *LoadBalancerTlsCertificate {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *LoadBalancerTlsCertificate) SetDomainName(v string) *LoadBalancerTlsCertificate {
+ s.DomainName = &v
+ return s
+}
+
+// SetDomainValidationRecords sets the DomainValidationRecords field's value.
+func (s *LoadBalancerTlsCertificate) SetDomainValidationRecords(v []*LoadBalancerTlsCertificateDomainValidationRecord) *LoadBalancerTlsCertificate {
+ s.DomainValidationRecords = v
+ return s
+}
+
+// SetFailureReason sets the FailureReason field's value.
+func (s *LoadBalancerTlsCertificate) SetFailureReason(v string) *LoadBalancerTlsCertificate {
+ s.FailureReason = &v
+ return s
+}
+
+// SetIsAttached sets the IsAttached field's value.
+func (s *LoadBalancerTlsCertificate) SetIsAttached(v bool) *LoadBalancerTlsCertificate {
+ s.IsAttached = &v
+ return s
+}
+
+// SetIssuedAt sets the IssuedAt field's value.
+func (s *LoadBalancerTlsCertificate) SetIssuedAt(v time.Time) *LoadBalancerTlsCertificate {
+ s.IssuedAt = &v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *LoadBalancerTlsCertificate) SetIssuer(v string) *LoadBalancerTlsCertificate {
+ s.Issuer = &v
+ return s
+}
+
+// SetKeyAlgorithm sets the KeyAlgorithm field's value.
+func (s *LoadBalancerTlsCertificate) SetKeyAlgorithm(v string) *LoadBalancerTlsCertificate {
+ s.KeyAlgorithm = &v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *LoadBalancerTlsCertificate) SetLoadBalancerName(v string) *LoadBalancerTlsCertificate {
+ s.LoadBalancerName = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *LoadBalancerTlsCertificate) SetLocation(v *ResourceLocation) *LoadBalancerTlsCertificate {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *LoadBalancerTlsCertificate) SetName(v string) *LoadBalancerTlsCertificate {
+ s.Name = &v
+ return s
+}
+
+// SetNotAfter sets the NotAfter field's value.
+func (s *LoadBalancerTlsCertificate) SetNotAfter(v time.Time) *LoadBalancerTlsCertificate {
+ s.NotAfter = &v
+ return s
+}
+
+// SetNotBefore sets the NotBefore field's value.
+func (s *LoadBalancerTlsCertificate) SetNotBefore(v time.Time) *LoadBalancerTlsCertificate {
+ s.NotBefore = &v
+ return s
+}
+
+// SetRenewalSummary sets the RenewalSummary field's value.
+func (s *LoadBalancerTlsCertificate) SetRenewalSummary(v *LoadBalancerTlsCertificateRenewalSummary) *LoadBalancerTlsCertificate {
+ s.RenewalSummary = v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *LoadBalancerTlsCertificate) SetResourceType(v string) *LoadBalancerTlsCertificate {
+ s.ResourceType = &v
+ return s
+}
+
+// SetRevocationReason sets the RevocationReason field's value.
+func (s *LoadBalancerTlsCertificate) SetRevocationReason(v string) *LoadBalancerTlsCertificate {
+ s.RevocationReason = &v
+ return s
+}
+
+// SetRevokedAt sets the RevokedAt field's value.
+func (s *LoadBalancerTlsCertificate) SetRevokedAt(v time.Time) *LoadBalancerTlsCertificate {
+ s.RevokedAt = &v
+ return s
+}
+
+// SetSerial sets the Serial field's value.
+func (s *LoadBalancerTlsCertificate) SetSerial(v string) *LoadBalancerTlsCertificate {
+ s.Serial = &v
+ return s
+}
+
+// SetSignatureAlgorithm sets the SignatureAlgorithm field's value.
+func (s *LoadBalancerTlsCertificate) SetSignatureAlgorithm(v string) *LoadBalancerTlsCertificate {
+ s.SignatureAlgorithm = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *LoadBalancerTlsCertificate) SetStatus(v string) *LoadBalancerTlsCertificate {
+ s.Status = &v
+ return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *LoadBalancerTlsCertificate) SetSubject(v string) *LoadBalancerTlsCertificate {
+ s.Subject = &v
+ return s
+}
+
+// SetSubjectAlternativeNames sets the SubjectAlternativeNames field's value.
+func (s *LoadBalancerTlsCertificate) SetSubjectAlternativeNames(v []*string) *LoadBalancerTlsCertificate {
+ s.SubjectAlternativeNames = v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *LoadBalancerTlsCertificate) SetSupportCode(v string) *LoadBalancerTlsCertificate {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *LoadBalancerTlsCertificate) SetTags(v []*Tag) *LoadBalancerTlsCertificate {
+ s.Tags = v
+ return s
+}
+
+// Contains information about the domain names on an SSL/TLS certificate that
+// you will use to validate domain ownership.
+type LoadBalancerTlsCertificateDomainValidationOption struct {
+ _ struct{} `type:"structure"`
+
+ // The fully qualified domain name in the certificate request.
+ DomainName *string `locationName:"domainName" type:"string"`
+
+ // The status of the domain validation. Valid values are listed below.
+ ValidationStatus *string `locationName:"validationStatus" type:"string" enum:"LoadBalancerTlsCertificateDomainStatus"`
+}
+
+// String returns the string representation
+func (s LoadBalancerTlsCertificateDomainValidationOption) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoadBalancerTlsCertificateDomainValidationOption) GoString() string {
+ return s.String()
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *LoadBalancerTlsCertificateDomainValidationOption) SetDomainName(v string) *LoadBalancerTlsCertificateDomainValidationOption {
+ s.DomainName = &v
+ return s
+}
+
+// SetValidationStatus sets the ValidationStatus field's value.
+func (s *LoadBalancerTlsCertificateDomainValidationOption) SetValidationStatus(v string) *LoadBalancerTlsCertificateDomainValidationOption {
+ s.ValidationStatus = &v
+ return s
+}
+
+// Describes the validation record of each domain name in the SSL/TLS certificate.
+type LoadBalancerTlsCertificateDomainValidationRecord struct {
+ _ struct{} `type:"structure"`
+
+ // The domain name against which your SSL/TLS certificate was validated.
+ DomainName *string `locationName:"domainName" type:"string"`
+
+ // A fully qualified domain name in the certificate. For example, example.com.
+ Name *string `locationName:"name" type:"string"`
+
+ // The type of validation record. For example, CNAME for domain validation.
+ Type *string `locationName:"type" type:"string"`
+
+ // The validation status. Valid values are listed below.
+ ValidationStatus *string `locationName:"validationStatus" type:"string" enum:"LoadBalancerTlsCertificateDomainStatus"`
+
+ // The value for that type.
+ Value *string `locationName:"value" type:"string"`
+}
+
+// String returns the string representation
+func (s LoadBalancerTlsCertificateDomainValidationRecord) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoadBalancerTlsCertificateDomainValidationRecord) GoString() string {
+ return s.String()
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *LoadBalancerTlsCertificateDomainValidationRecord) SetDomainName(v string) *LoadBalancerTlsCertificateDomainValidationRecord {
+ s.DomainName = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *LoadBalancerTlsCertificateDomainValidationRecord) SetName(v string) *LoadBalancerTlsCertificateDomainValidationRecord {
+ s.Name = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *LoadBalancerTlsCertificateDomainValidationRecord) SetType(v string) *LoadBalancerTlsCertificateDomainValidationRecord {
+ s.Type = &v
+ return s
+}
+
+// SetValidationStatus sets the ValidationStatus field's value.
+func (s *LoadBalancerTlsCertificateDomainValidationRecord) SetValidationStatus(v string) *LoadBalancerTlsCertificateDomainValidationRecord {
+ s.ValidationStatus = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *LoadBalancerTlsCertificateDomainValidationRecord) SetValue(v string) *LoadBalancerTlsCertificateDomainValidationRecord {
+ s.Value = &v
+ return s
+}
+
+// Contains information about the status of Lightsail's managed renewal for
+// the certificate.
+//
+// The renewal status of the certificate.
+//
+// The following renewal status are possible:
+//
+// * PendingAutoRenewal - Lightsail is attempting to automatically validate
+// the domain names in the certificate. No further action is required.
+//
+// * PendingValidation - Lightsail couldn't automatically validate one or
+// more domain names in the certificate. You must take action to validate
+// these domain names or the certificate won't be renewed. If you used DNS
+// validation, check to make sure your certificate's domain validation records
+// exist in your domain's DNS, and that your certificate remains in use.
+//
+// * Success - All domain names in the certificate are validated, and Lightsail
+// renewed the certificate. No further action is required.
+//
+// * Failed - One or more domain names were not validated before the certificate
+// expired, and Lightsail did not renew the certificate. You can request
+// a new certificate using the CreateCertificate action.
+type LoadBalancerTlsCertificateRenewalSummary struct {
+ _ struct{} `type:"structure"`
+
+ // Contains information about the validation of each domain name in the certificate,
+ // as it pertains to Lightsail's managed renewal. This is different from the
+ // initial validation that occurs as a result of the RequestCertificate request.
+ DomainValidationOptions []*LoadBalancerTlsCertificateDomainValidationOption `locationName:"domainValidationOptions" type:"list"`
+
+ // The renewal status of the certificate.
+ //
+ // The following renewal status are possible:
+ //
+ // * PendingAutoRenewal - Lightsail is attempting to automatically validate
+ // the domain names of the certificate. No further action is required.
+ //
+ // * PendingValidation - Lightsail couldn't automatically validate one or
+ // more domain names of the certificate. You must take action to validate
+ // these domain names or the certificate won't be renewed. Check to make
+ // sure your certificate's domain validation records exist in your domain's
+ // DNS, and that your certificate remains in use.
+ //
+ // * Success - All domain names in the certificate are validated, and Lightsail
+ // renewed the certificate. No further action is required.
+ //
+ // * Failed - One or more domain names were not validated before the certificate
+ // expired, and Lightsail did not renew the certificate. You can request
+ // a new certificate using the CreateCertificate action.
+ RenewalStatus *string `locationName:"renewalStatus" type:"string" enum:"LoadBalancerTlsCertificateRenewalStatus"`
+}
+
+// String returns the string representation
+func (s LoadBalancerTlsCertificateRenewalSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoadBalancerTlsCertificateRenewalSummary) GoString() string {
+ return s.String()
+}
+
+// SetDomainValidationOptions sets the DomainValidationOptions field's value.
+func (s *LoadBalancerTlsCertificateRenewalSummary) SetDomainValidationOptions(v []*LoadBalancerTlsCertificateDomainValidationOption) *LoadBalancerTlsCertificateRenewalSummary {
+ s.DomainValidationOptions = v
+ return s
+}
+
+// SetRenewalStatus sets the RenewalStatus field's value.
+func (s *LoadBalancerTlsCertificateRenewalSummary) SetRenewalStatus(v string) *LoadBalancerTlsCertificateRenewalSummary {
+ s.RenewalStatus = &v
+ return s
+}
+
+// Provides a summary of SSL/TLS certificate metadata.
+type LoadBalancerTlsCertificateSummary struct {
+ _ struct{} `type:"structure"`
+
+ // When true, the SSL/TLS certificate is attached to the Lightsail load balancer.
+ IsAttached *bool `locationName:"isAttached" type:"boolean"`
+
+ // The name of the SSL/TLS certificate.
+ Name *string `locationName:"name" type:"string"`
+}
+
+// String returns the string representation
+func (s LoadBalancerTlsCertificateSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoadBalancerTlsCertificateSummary) GoString() string {
+ return s.String()
+}
+
+// SetIsAttached sets the IsAttached field's value.
+func (s *LoadBalancerTlsCertificateSummary) SetIsAttached(v bool) *LoadBalancerTlsCertificateSummary {
+ s.IsAttached = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *LoadBalancerTlsCertificateSummary) SetName(v string) *LoadBalancerTlsCertificateSummary {
+ s.Name = &v
+ return s
+}
+
+// Describes a database log event.
+type LogEvent struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp when the database log event was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The message of the database log event.
+ Message *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s LogEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LogEvent) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *LogEvent) SetCreatedAt(v time.Time) *LogEvent {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *LogEvent) SetMessage(v string) *LogEvent {
+ s.Message = &v
+ return s
+}
+
+// Describes the metric data point.
+type MetricDatapoint struct {
+ _ struct{} `type:"structure"`
+
+ // The average.
+ Average *float64 `locationName:"average" type:"double"`
+
+ // The maximum.
+ Maximum *float64 `locationName:"maximum" type:"double"`
+
+ // The minimum.
+ Minimum *float64 `locationName:"minimum" type:"double"`
+
+ // The sample count.
+ SampleCount *float64 `locationName:"sampleCount" type:"double"`
+
+ // The sum.
+ Sum *float64 `locationName:"sum" type:"double"`
+
+ // The timestamp (e.g., 1479816991.349).
+ Timestamp *time.Time `locationName:"timestamp" type:"timestamp"`
+
+ // The unit.
+ Unit *string `locationName:"unit" type:"string" enum:"MetricUnit"`
+}
+
+// String returns the string representation
+func (s MetricDatapoint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricDatapoint) GoString() string {
+ return s.String()
+}
+
+// SetAverage sets the Average field's value.
+func (s *MetricDatapoint) SetAverage(v float64) *MetricDatapoint {
+ s.Average = &v
+ return s
+}
+
+// SetMaximum sets the Maximum field's value.
+func (s *MetricDatapoint) SetMaximum(v float64) *MetricDatapoint {
+ s.Maximum = &v
+ return s
+}
+
+// SetMinimum sets the Minimum field's value.
+func (s *MetricDatapoint) SetMinimum(v float64) *MetricDatapoint {
+ s.Minimum = &v
+ return s
+}
+
+// SetSampleCount sets the SampleCount field's value.
+func (s *MetricDatapoint) SetSampleCount(v float64) *MetricDatapoint {
+ s.SampleCount = &v
+ return s
+}
+
+// SetSum sets the Sum field's value.
+func (s *MetricDatapoint) SetSum(v float64) *MetricDatapoint {
+ s.Sum = &v
+ return s
+}
+
+// SetTimestamp sets the Timestamp field's value.
+func (s *MetricDatapoint) SetTimestamp(v time.Time) *MetricDatapoint {
+ s.Timestamp = &v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *MetricDatapoint) SetUnit(v string) *MetricDatapoint {
+ s.Unit = &v
+ return s
+}
+
+// Describes resource being monitored by an alarm.
+//
+// An alarm is a way to monitor your Amazon Lightsail resource metrics. For
+// more information, see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms).
+type MonitoredResourceInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the resource being monitored.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The name of the Lightsail resource being monitored.
+ Name *string `locationName:"name" type:"string"`
+
+ // The Lightsail resource type of the resource being monitored.
+ //
+ // Instances, load balancers, and relational databases are the only Lightsail
+ // resources that can currently be monitored by alarms.
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+}
+
+// String returns the string representation
+func (s MonitoredResourceInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MonitoredResourceInfo) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *MonitoredResourceInfo) SetArn(v string) *MonitoredResourceInfo {
+ s.Arn = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *MonitoredResourceInfo) SetName(v string) *MonitoredResourceInfo {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *MonitoredResourceInfo) SetResourceType(v string) *MonitoredResourceInfo {
+ s.ResourceType = &v
+ return s
+}
+
+// Describes the monthly data transfer in and out of your virtual private server
+// (or instance).
+type MonthlyTransfer struct {
+ _ struct{} `type:"structure"`
+
+ // The amount allocated per month (in GB).
+ GbPerMonthAllocated *int64 `locationName:"gbPerMonthAllocated" type:"integer"`
+}
+
+// String returns the string representation
+func (s MonthlyTransfer) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MonthlyTransfer) GoString() string {
+ return s.String()
+}
+
+// SetGbPerMonthAllocated sets the GbPerMonthAllocated field's value.
+func (s *MonthlyTransfer) SetGbPerMonthAllocated(v int64) *MonthlyTransfer {
+ s.GbPerMonthAllocated = &v
+ return s
+}
+
+// Lightsail throws this exception when it cannot find a resource.
+type NotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Code_ *string `locationName:"code" type:"string"`
+
+ Docs *string `locationName:"docs" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ Tip *string `locationName:"tip" type:"string"`
+}
+
+// String returns the string representation
+func (s NotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorNotFoundException(v protocol.ResponseMetadata) error {
+ return &NotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *NotFoundException) Code() string {
+ return "NotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *NotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *NotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *NotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *NotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *NotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type OpenInstancePublicPortsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance for which to open ports.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+
+ // An object to describe the ports to open for the specified instance.
+ //
+ // PortInfo is a required field
+ PortInfo *PortInfo `locationName:"portInfo" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s OpenInstancePublicPortsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s OpenInstancePublicPortsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *OpenInstancePublicPortsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "OpenInstancePublicPortsInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+ if s.PortInfo == nil {
+ invalidParams.Add(request.NewErrParamRequired("PortInfo"))
+ }
+ if s.PortInfo != nil {
+ if err := s.PortInfo.Validate(); err != nil {
+ invalidParams.AddNested("PortInfo", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *OpenInstancePublicPortsInput) SetInstanceName(v string) *OpenInstancePublicPortsInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetPortInfo sets the PortInfo field's value.
+func (s *OpenInstancePublicPortsInput) SetPortInfo(v *PortInfo) *OpenInstancePublicPortsInput {
+ s.PortInfo = v
+ return s
+}
+
+type OpenInstancePublicPortsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s OpenInstancePublicPortsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s OpenInstancePublicPortsOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *OpenInstancePublicPortsOutput) SetOperation(v *Operation) *OpenInstancePublicPortsOutput {
+ s.Operation = v
+ return s
+}
+
+// Describes the API operation.
+type Operation struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp when the operation was initialized (e.g., 1479816991.349).
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The error code.
+ ErrorCode *string `locationName:"errorCode" type:"string"`
+
+ // The error details.
+ ErrorDetails *string `locationName:"errorDetails" type:"string"`
+
+ // The ID of the operation.
+ Id *string `locationName:"id" type:"string"`
+
+ // A Boolean value indicating whether the operation is terminal.
+ IsTerminal *bool `locationName:"isTerminal" type:"boolean"`
+
+ // The AWS Region and Availability Zone.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // Details about the operation (e.g., Debian-1GB-Ohio-1).
+ OperationDetails *string `locationName:"operationDetails" type:"string"`
+
+ // The type of operation.
+ OperationType *string `locationName:"operationType" type:"string" enum:"OperationType"`
+
+ // The resource name.
+ ResourceName *string `locationName:"resourceName" type:"string"`
+
+ // The resource type.
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The status of the operation.
+ Status *string `locationName:"status" type:"string" enum:"OperationStatus"`
+
+ // The timestamp when the status was changed (e.g., 1479816991.349).
+ StatusChangedAt *time.Time `locationName:"statusChangedAt" type:"timestamp"`
+}
+
+// String returns the string representation
+func (s Operation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Operation) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Operation) SetCreatedAt(v time.Time) *Operation {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetErrorCode sets the ErrorCode field's value.
+func (s *Operation) SetErrorCode(v string) *Operation {
+ s.ErrorCode = &v
+ return s
+}
+
+// SetErrorDetails sets the ErrorDetails field's value.
+func (s *Operation) SetErrorDetails(v string) *Operation {
+ s.ErrorDetails = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *Operation) SetId(v string) *Operation {
+ s.Id = &v
+ return s
+}
+
+// SetIsTerminal sets the IsTerminal field's value.
+func (s *Operation) SetIsTerminal(v bool) *Operation {
+ s.IsTerminal = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *Operation) SetLocation(v *ResourceLocation) *Operation {
+ s.Location = v
+ return s
+}
+
+// SetOperationDetails sets the OperationDetails field's value.
+func (s *Operation) SetOperationDetails(v string) *Operation {
+ s.OperationDetails = &v
+ return s
+}
+
+// SetOperationType sets the OperationType field's value.
+func (s *Operation) SetOperationType(v string) *Operation {
+ s.OperationType = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *Operation) SetResourceName(v string) *Operation {
+ s.ResourceName = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *Operation) SetResourceType(v string) *Operation {
+ s.ResourceType = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *Operation) SetStatus(v string) *Operation {
+ s.Status = &v
+ return s
+}
+
+// SetStatusChangedAt sets the StatusChangedAt field's value.
+func (s *Operation) SetStatusChangedAt(v time.Time) *Operation {
+ s.StatusChangedAt = &v
+ return s
+}
+
+// Lightsail throws this exception when an operation fails to execute.
+type OperationFailureException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Code_ *string `locationName:"code" type:"string"`
+
+ Docs *string `locationName:"docs" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ Tip *string `locationName:"tip" type:"string"`
+}
+
+// String returns the string representation
+func (s OperationFailureException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s OperationFailureException) GoString() string {
+ return s.String()
+}
+
+func newErrorOperationFailureException(v protocol.ResponseMetadata) error {
+ return &OperationFailureException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *OperationFailureException) Code() string {
+ return "OperationFailureException"
+}
+
+// Message returns the exception's message.
+func (s *OperationFailureException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *OperationFailureException) OrigErr() error {
+ return nil
+}
+
+func (s *OperationFailureException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *OperationFailureException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *OperationFailureException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Describes the origin resource of an Amazon Lightsail content delivery network
+// (CDN) distribution.
+//
+// An origin can be a Lightsail instance or load balancer. A distribution pulls
+// content from an origin, caches it, and serves it to viewers via a worldwide
+// network of edge servers.
+type Origin struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the origin resource.
+ Name *string `locationName:"name" type:"string"`
+
+ // The protocol that your Amazon Lightsail distribution uses when establishing
+ // a connection with your origin to pull content.
+ ProtocolPolicy *string `locationName:"protocolPolicy" type:"string" enum:"OriginProtocolPolicyEnum"`
+
+ // The AWS Region name of the origin resource.
+ RegionName *string `locationName:"regionName" type:"string" enum:"RegionName"`
+
+ // The resource type of the origin resource (e.g., Instance).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+}
+
+// String returns the string representation
+func (s Origin) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Origin) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *Origin) SetName(v string) *Origin {
+ s.Name = &v
+ return s
+}
+
+// SetProtocolPolicy sets the ProtocolPolicy field's value.
+func (s *Origin) SetProtocolPolicy(v string) *Origin {
+ s.ProtocolPolicy = &v
+ return s
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *Origin) SetRegionName(v string) *Origin {
+ s.RegionName = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *Origin) SetResourceType(v string) *Origin {
+ s.ResourceType = &v
+ return s
+}
+
+// The password data for the Windows Server-based instance, including the ciphertext
+// and the key pair name.
+type PasswordData struct {
+ _ struct{} `type:"structure"`
+
+ // The encrypted password. Ciphertext will be an empty string if access to your
+ // new instance is not ready yet. When you create an instance, it can take up
+ // to 15 minutes for the instance to be ready.
+ //
+ // If you use the default key pair (LightsailDefaultKeyPair), the decrypted
+ // password will be available in the password field.
+ //
+ // If you are using a custom key pair, you need to use your own means of decryption.
+ //
+ // If you change the Administrator password on the instance, Lightsail will
+ // continue to return the original ciphertext value. When accessing the instance
+ // using RDP, you need to manually enter the Administrator password after changing
+ // it from the default.
+ Ciphertext *string `locationName:"ciphertext" type:"string"`
+
+ // The name of the key pair that you used when creating your instance. If no
+ // key pair name was specified when creating the instance, Lightsail uses the
+ // default key pair (LightsailDefaultKeyPair).
+ //
+ // If you are using a custom key pair, you need to use your own means of decrypting
+ // your password using the ciphertext. Lightsail creates the ciphertext by encrypting
+ // your password with the public key part of this key pair.
+ KeyPairName *string `locationName:"keyPairName" type:"string"`
+}
+
+// String returns the string representation
+func (s PasswordData) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PasswordData) GoString() string {
+ return s.String()
+}
+
+// SetCiphertext sets the Ciphertext field's value.
+func (s *PasswordData) SetCiphertext(v string) *PasswordData {
+ s.Ciphertext = &v
+ return s
+}
+
+// SetKeyPairName sets the KeyPairName field's value.
+func (s *PasswordData) SetKeyPairName(v string) *PasswordData {
+ s.KeyPairName = &v
+ return s
+}
+
+type PeerVpcInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PeerVpcInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PeerVpcInput) GoString() string {
+ return s.String()
+}
+
+type PeerVpcOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s PeerVpcOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PeerVpcOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *PeerVpcOutput) SetOperation(v *Operation) *PeerVpcOutput {
+ s.Operation = v
+ return s
+}
+
+// Describes a pending database maintenance action.
+type PendingMaintenanceAction struct {
+ _ struct{} `type:"structure"`
+
+ // The type of pending database maintenance action.
+ Action *string `locationName:"action" type:"string"`
+
+ // The effective date of the pending database maintenance action.
+ CurrentApplyDate *time.Time `locationName:"currentApplyDate" type:"timestamp"`
+
+ // Additional detail about the pending database maintenance action.
+ Description *string `locationName:"description" type:"string"`
+}
+
+// String returns the string representation
+func (s PendingMaintenanceAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PendingMaintenanceAction) GoString() string {
+ return s.String()
+}
+
+// SetAction sets the Action field's value.
+func (s *PendingMaintenanceAction) SetAction(v string) *PendingMaintenanceAction {
+ s.Action = &v
+ return s
+}
+
+// SetCurrentApplyDate sets the CurrentApplyDate field's value.
+func (s *PendingMaintenanceAction) SetCurrentApplyDate(v time.Time) *PendingMaintenanceAction {
+ s.CurrentApplyDate = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *PendingMaintenanceAction) SetDescription(v string) *PendingMaintenanceAction {
+ s.Description = &v
+ return s
+}
+
+// Describes a pending database value modification.
+type PendingModifiedRelationalDatabaseValues struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value indicating whether automated backup retention is enabled.
+ BackupRetentionEnabled *bool `locationName:"backupRetentionEnabled" type:"boolean"`
+
+ // The database engine version.
+ EngineVersion *string `locationName:"engineVersion" type:"string"`
+
+ // The password for the master user of the database.
+ MasterUserPassword *string `locationName:"masterUserPassword" type:"string"`
+}
+
+// String returns the string representation
+func (s PendingModifiedRelationalDatabaseValues) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PendingModifiedRelationalDatabaseValues) GoString() string {
+ return s.String()
+}
+
+// SetBackupRetentionEnabled sets the BackupRetentionEnabled field's value.
+func (s *PendingModifiedRelationalDatabaseValues) SetBackupRetentionEnabled(v bool) *PendingModifiedRelationalDatabaseValues {
+ s.BackupRetentionEnabled = &v
+ return s
+}
+
+// SetEngineVersion sets the EngineVersion field's value.
+func (s *PendingModifiedRelationalDatabaseValues) SetEngineVersion(v string) *PendingModifiedRelationalDatabaseValues {
+ s.EngineVersion = &v
+ return s
+}
+
+// SetMasterUserPassword sets the MasterUserPassword field's value.
+func (s *PendingModifiedRelationalDatabaseValues) SetMasterUserPassword(v string) *PendingModifiedRelationalDatabaseValues {
+ s.MasterUserPassword = &v
+ return s
+}
+
+// Describes ports to open on an instance, the IP addresses allowed to connect
+// to the instance through the ports, and the protocol.
+type PortInfo struct {
+ _ struct{} `type:"structure"`
+
+ // An alias that defines access for a preconfigured range of IP addresses.
+ //
+ // The only alias currently supported is lightsail-connect, which allows IP
+ // addresses of the browser-based RDP/SSH client in the Lightsail console to
+ // connect to your instance.
+ CidrListAliases []*string `locationName:"cidrListAliases" type:"list"`
+
+ // The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are
+ // allowed to connect to an instance through the ports, and the protocol.
+ //
+ // The ipv6Cidrs parameter lists the IPv6 addresses that are allowed to connect
+ // to an instance.
+ //
+ // Examples:
+ //
+ // * To allow the IP address 192.0.2.44, specify 192.0.2.44 or 192.0.2.44/32.
+ //
+ // * To allow the IP addresses 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.
+ //
+ // For more information about CIDR block notation, see Classless Inter-Domain
+ // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
+ // on Wikipedia.
+ Cidrs []*string `locationName:"cidrs" type:"list"`
+
+ // The first port in a range of open ports on an instance.
+ //
+ // Allowed ports:
+ //
+ // * TCP and UDP - 0 to 65535
+ //
+ // * ICMP - The ICMP type for IPv4 addresses. For example, specify 8 as the
+ // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP
+ // Ping. For more information, see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages)
+ // on Wikipedia.
+ //
+ // * ICMPv6 - The ICMP type for IPv6 addresses. For example, specify 128
+ // as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more
+ // information, see Internet Control Message Protocol for IPv6 (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol_for_IPv6).
+ FromPort *int64 `locationName:"fromPort" type:"integer"`
+
+ // The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are
+ // allowed to connect to an instance through the ports, and the protocol. Only
+ // devices with an IPv6 address can connect to an instance through IPv6; otherwise,
+ // IPv4 should be used.
+ //
+ // The cidrs parameter lists the IPv4 addresses that are allowed to connect
+ // to an instance.
+ //
+ // For more information about CIDR block notation, see Classless Inter-Domain
+ // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
+ // on Wikipedia.
+ Ipv6Cidrs []*string `locationName:"ipv6Cidrs" type:"list"`
+
+ // The IP protocol name.
+ //
+ // The name can be one of the following:
+ //
+ // * tcp - Transmission Control Protocol (TCP) provides reliable, ordered,
+ // and error-checked delivery of streamed data between applications running
+ // on hosts communicating by an IP network. If you have an application that
+ // doesn't require reliable data stream service, use UDP instead.
+ //
+ // * all - All transport layer protocol types. For more general information,
+ // see Transport layer (https://en.wikipedia.org/wiki/Transport_layer) on
+ // Wikipedia.
+ //
+ // * udp - With User Datagram Protocol (UDP), computer applications can send
+ // messages (or datagrams) to other hosts on an Internet Protocol (IP) network.
+ // Prior communications are not required to set up transmission channels
+ // or data paths. Applications that don't require reliable data stream service
+ // can use UDP, which provides a connectionless datagram service that emphasizes
+ // reduced latency over reliability. If you do require reliable data stream
+ // service, use TCP instead.
+ //
+ // * icmp - Internet Control Message Protocol (ICMP) is used to send error
+ // messages and operational information indicating success or failure when
+ // communicating with an instance. For example, an error is indicated when
+ // an instance could not be reached. When you specify icmp as the protocol,
+ // you must specify the ICMP type using the fromPort parameter, and ICMP
+ // code using the toPort parameter.
+ Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"`
+
+ // The last port in a range of open ports on an instance.
+ //
+ // Allowed ports:
+ //
+ // * TCP and UDP - 0 to 65535
+ //
+ // * ICMP - The ICMP code for IPv4 addresses. For example, specify 8 as the
+ // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP
+ // Ping. For more information, see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages)
+ // on Wikipedia.
+ //
+ // * ICMPv6 - The ICMP code for IPv6 addresses. For example, specify 128
+ // as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more
+ // information, see Internet Control Message Protocol for IPv6 (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol_for_IPv6).
+ ToPort *int64 `locationName:"toPort" type:"integer"`
+}
+
+// String returns the string representation
+func (s PortInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PortInfo) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PortInfo) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PortInfo"}
+ if s.FromPort != nil && *s.FromPort < -1 {
+ invalidParams.Add(request.NewErrParamMinValue("FromPort", -1))
+ }
+ if s.ToPort != nil && *s.ToPort < -1 {
+ invalidParams.Add(request.NewErrParamMinValue("ToPort", -1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCidrListAliases sets the CidrListAliases field's value.
+func (s *PortInfo) SetCidrListAliases(v []*string) *PortInfo {
+ s.CidrListAliases = v
+ return s
+}
+
+// SetCidrs sets the Cidrs field's value.
+func (s *PortInfo) SetCidrs(v []*string) *PortInfo {
+ s.Cidrs = v
+ return s
+}
+
+// SetFromPort sets the FromPort field's value.
+func (s *PortInfo) SetFromPort(v int64) *PortInfo {
+ s.FromPort = &v
+ return s
+}
+
+// SetIpv6Cidrs sets the Ipv6Cidrs field's value.
+func (s *PortInfo) SetIpv6Cidrs(v []*string) *PortInfo {
+ s.Ipv6Cidrs = v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *PortInfo) SetProtocol(v string) *PortInfo {
+ s.Protocol = &v
+ return s
+}
+
+// SetToPort sets the ToPort field's value.
+func (s *PortInfo) SetToPort(v int64) *PortInfo {
+ s.ToPort = &v
+ return s
+}
+
+type PutAlarmInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name for the alarm. Specify the name of an existing alarm to update,
+ // and overwrite the previous configuration of the alarm.
+ //
+ // AlarmName is a required field
+ AlarmName *string `locationName:"alarmName" type:"string" required:"true"`
+
+ // The arithmetic operation to use when comparing the specified statistic to
+ // the threshold. The specified statistic value is used as the first operand.
+ //
+ // ComparisonOperator is a required field
+ ComparisonOperator *string `locationName:"comparisonOperator" type:"string" required:"true" enum:"ComparisonOperator"`
+
+ // The contact protocols to use for the alarm, such as Email, SMS (text messaging),
+ // or both.
+ //
+ // A notification is sent via the specified contact protocol if notifications
+ // are enabled for the alarm, and when the alarm is triggered.
+ //
+ // A notification is not sent if a contact protocol is not specified, if the
+ // specified contact protocol is not configured in the AWS Region, or if notifications
+ // are not enabled for the alarm using the notificationEnabled paramater.
+ //
+ // Use the CreateContactMethod action to configure a contact protocol in an
+ // AWS Region.
+ ContactProtocols []*string `locationName:"contactProtocols" type:"list"`
+
+ // The number of data points that must be not within the specified threshold
+ // to trigger the alarm. If you are setting an "M out of N" alarm, this value
+ // (datapointsToAlarm) is the M.
+ DatapointsToAlarm *int64 `locationName:"datapointsToAlarm" type:"integer"`
+
+ // The number of most recent periods over which data is compared to the specified
+ // threshold. If you are setting an "M out of N" alarm, this value (evaluationPeriods)
+ // is the N.
+ //
+ // If you are setting an alarm that requires that a number of consecutive data
+ // points be breaching to trigger the alarm, this value specifies the rolling
+ // period of time in which data points are evaluated.
+ //
+ // Each evaluation period is five minutes long. For example, specify an evaluation
+ // period of 24 to evaluate a metric over a rolling period of two hours.
+ //
+ // You can specify a minimum valuation period of 1 (5 minutes), and a maximum
+ // evaluation period of 288 (24 hours).
+ //
+ // EvaluationPeriods is a required field
+ EvaluationPeriods *int64 `locationName:"evaluationPeriods" type:"integer" required:"true"`
+
+ // The name of the metric to associate with the alarm.
+ //
+ // You can configure up to two alarms per metric.
+ //
+ // The following metrics are available for each resource type:
+ //
+ // * Instances: BurstCapacityPercentage, BurstCapacityTime, CPUUtilization,
+ // NetworkIn, NetworkOut, StatusCheckFailed, StatusCheckFailed_Instance,
+ // and StatusCheckFailed_System.
+ //
+ // * Load balancers: ClientTLSNegotiationErrorCount, HealthyHostCount, UnhealthyHostCount,
+ // HTTPCode_LB_4XX_Count, HTTPCode_LB_5XX_Count, HTTPCode_Instance_2XX_Count,
+ // HTTPCode_Instance_3XX_Count, HTTPCode_Instance_4XX_Count, HTTPCode_Instance_5XX_Count,
+ // InstanceResponseTime, RejectedConnectionCount, and RequestCount.
+ //
+ // * Relational databases: CPUUtilization, DatabaseConnections, DiskQueueDepth,
+ // FreeStorageSpace, NetworkReceiveThroughput, and NetworkTransmitThroughput.
+ //
+ // For more information about these metrics, see Metrics available in Lightsail
+ // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-resource-health-metrics#available-metrics).
+ //
+ // MetricName is a required field
+ MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"MetricName"`
+
+ // The name of the Lightsail resource that will be monitored.
+ //
+ // Instances, load balancers, and relational databases are the only Lightsail
+ // resources that can currently be monitored by alarms.
+ //
+ // MonitoredResourceName is a required field
+ MonitoredResourceName *string `locationName:"monitoredResourceName" type:"string" required:"true"`
+
+ // Indicates whether the alarm is enabled.
+ //
+ // Notifications are enabled by default if you don't specify this parameter.
+ NotificationEnabled *bool `locationName:"notificationEnabled" type:"boolean"`
+
+ // The alarm states that trigger a notification.
+ //
+ // An alarm has the following possible states:
+ //
+ // * ALARM - The metric is outside of the defined threshold.
+ //
+ // * INSUFFICIENT_DATA - The alarm has just started, the metric is not available,
+ // or not enough data is available for the metric to determine the alarm
+ // state.
+ //
+ // * OK - The metric is within the defined threshold.
+ //
+ // When you specify a notification trigger, the ALARM state must be specified.
+ // The INSUFFICIENT_DATA and OK states can be specified in addition to the ALARM
+ // state.
+ //
+ // * If you specify OK as an alarm trigger, a notification is sent when the
+ // alarm switches from an ALARM or INSUFFICIENT_DATA alarm state to an OK
+ // state. This can be thought of as an all clear alarm notification.
+ //
+ // * If you specify INSUFFICIENT_DATA as the alarm trigger, a notification
+ // is sent when the alarm switches from an OK or ALARM alarm state to an
+ // INSUFFICIENT_DATA state.
+ //
+ // The notification trigger defaults to ALARM if you don't specify this parameter.
+ NotificationTriggers []*string `locationName:"notificationTriggers" type:"list"`
+
+ // The value against which the specified statistic is compared.
+ //
+ // Threshold is a required field
+ Threshold *float64 `locationName:"threshold" type:"double" required:"true"`
+
+ // Sets how this alarm will handle missing data points.
+ //
+ // An alarm can treat missing data in the following ways:
+ //
+ // * breaching - Assume the missing data is not within the threshold. Missing
+ // data counts towards the number of times the metric is not within the threshold.
+ //
+ // * notBreaching - Assume the missing data is within the threshold. Missing
+ // data does not count towards the number of times the metric is not within
+ // the threshold.
+ //
+ // * ignore - Ignore the missing data. Maintains the current alarm state.
+ //
+ // * missing - Missing data is treated as missing.
+ //
+ // If treatMissingData is not specified, the default behavior of missing is
+ // used.
+ TreatMissingData *string `locationName:"treatMissingData" type:"string" enum:"TreatMissingData"`
+}
+
+// String returns the string representation
+func (s PutAlarmInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutAlarmInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutAlarmInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutAlarmInput"}
+ if s.AlarmName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AlarmName"))
+ }
+ if s.ComparisonOperator == nil {
+ invalidParams.Add(request.NewErrParamRequired("ComparisonOperator"))
+ }
+ if s.EvaluationPeriods == nil {
+ invalidParams.Add(request.NewErrParamRequired("EvaluationPeriods"))
+ }
+ if s.MetricName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricName"))
+ }
+ if s.MonitoredResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("MonitoredResourceName"))
+ }
+ if s.Threshold == nil {
+ invalidParams.Add(request.NewErrParamRequired("Threshold"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAlarmName sets the AlarmName field's value.
+func (s *PutAlarmInput) SetAlarmName(v string) *PutAlarmInput {
+ s.AlarmName = &v
+ return s
+}
+
+// SetComparisonOperator sets the ComparisonOperator field's value.
+func (s *PutAlarmInput) SetComparisonOperator(v string) *PutAlarmInput {
+ s.ComparisonOperator = &v
+ return s
+}
+
+// SetContactProtocols sets the ContactProtocols field's value.
+func (s *PutAlarmInput) SetContactProtocols(v []*string) *PutAlarmInput {
+ s.ContactProtocols = v
+ return s
+}
+
+// SetDatapointsToAlarm sets the DatapointsToAlarm field's value.
+func (s *PutAlarmInput) SetDatapointsToAlarm(v int64) *PutAlarmInput {
+ s.DatapointsToAlarm = &v
+ return s
+}
+
+// SetEvaluationPeriods sets the EvaluationPeriods field's value.
+func (s *PutAlarmInput) SetEvaluationPeriods(v int64) *PutAlarmInput {
+ s.EvaluationPeriods = &v
+ return s
+}
+
+// SetMetricName sets the MetricName field's value.
+func (s *PutAlarmInput) SetMetricName(v string) *PutAlarmInput {
+ s.MetricName = &v
+ return s
+}
+
+// SetMonitoredResourceName sets the MonitoredResourceName field's value.
+func (s *PutAlarmInput) SetMonitoredResourceName(v string) *PutAlarmInput {
+ s.MonitoredResourceName = &v
+ return s
+}
+
+// SetNotificationEnabled sets the NotificationEnabled field's value.
+func (s *PutAlarmInput) SetNotificationEnabled(v bool) *PutAlarmInput {
+ s.NotificationEnabled = &v
+ return s
+}
+
+// SetNotificationTriggers sets the NotificationTriggers field's value.
+func (s *PutAlarmInput) SetNotificationTriggers(v []*string) *PutAlarmInput {
+ s.NotificationTriggers = v
+ return s
+}
+
+// SetThreshold sets the Threshold field's value.
+func (s *PutAlarmInput) SetThreshold(v float64) *PutAlarmInput {
+ s.Threshold = &v
+ return s
+}
+
+// SetTreatMissingData sets the TreatMissingData field's value.
+func (s *PutAlarmInput) SetTreatMissingData(v string) *PutAlarmInput {
+ s.TreatMissingData = &v
+ return s
+}
+
+type PutAlarmOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s PutAlarmOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutAlarmOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *PutAlarmOutput) SetOperations(v []*Operation) *PutAlarmOutput {
+ s.Operations = v
+ return s
+}
+
+type PutInstancePublicPortsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance for which to open ports.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+
+ // An array of objects to describe the ports to open for the specified instance.
+ //
+ // PortInfos is a required field
+ PortInfos []*PortInfo `locationName:"portInfos" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s PutInstancePublicPortsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutInstancePublicPortsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutInstancePublicPortsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutInstancePublicPortsInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+ if s.PortInfos == nil {
+ invalidParams.Add(request.NewErrParamRequired("PortInfos"))
+ }
+ if s.PortInfos != nil {
+ for i, v := range s.PortInfos {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PortInfos", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *PutInstancePublicPortsInput) SetInstanceName(v string) *PutInstancePublicPortsInput {
+ s.InstanceName = &v
+ return s
+}
+
+// SetPortInfos sets the PortInfos field's value.
+func (s *PutInstancePublicPortsInput) SetPortInfos(v []*PortInfo) *PutInstancePublicPortsInput {
+ s.PortInfos = v
+ return s
+}
+
+type PutInstancePublicPortsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutInstancePublicPortsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutInstancePublicPortsOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *PutInstancePublicPortsOutput) SetOperation(v *Operation) *PutInstancePublicPortsOutput {
+ s.Operation = v
+ return s
+}
+
+// Describes the query string parameters that an Amazon Lightsail content delivery
+// network (CDN) distribution to bases caching on.
+//
+// For the query strings that you specify, your distribution caches separate
+// versions of the specified content based on the query string values in viewer
+// requests.
+type QueryStringObject struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether the distribution forwards and caches based on query strings.
+ Option *bool `locationName:"option" type:"boolean"`
+
+ // The specific query strings that the distribution forwards to the origin.
+ //
+ // Your distribution will cache content based on the specified query strings.
+ //
+ // If the option parameter is true, then your distribution forwards all query
+ // strings, regardless of what you specify using the queryStringsAllowList parameter.
+ QueryStringsAllowList []*string `locationName:"queryStringsAllowList" type:"list"`
+}
+
+// String returns the string representation
+func (s QueryStringObject) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueryStringObject) GoString() string {
+ return s.String()
+}
+
+// SetOption sets the Option field's value.
+func (s *QueryStringObject) SetOption(v bool) *QueryStringObject {
+ s.Option = &v
+ return s
+}
+
+// SetQueryStringsAllowList sets the QueryStringsAllowList field's value.
+func (s *QueryStringObject) SetQueryStringsAllowList(v []*string) *QueryStringObject {
+ s.QueryStringsAllowList = v
+ return s
+}
+
+type RebootInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance to reboot.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s RebootInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RebootInstanceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RebootInstanceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RebootInstanceInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *RebootInstanceInput) SetInstanceName(v string) *RebootInstanceInput {
+ s.InstanceName = &v
+ return s
+}
+
+type RebootInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s RebootInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RebootInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *RebootInstanceOutput) SetOperations(v []*Operation) *RebootInstanceOutput {
+ s.Operations = v
+ return s
+}
+
+type RebootRelationalDatabaseInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of your database to reboot.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s RebootRelationalDatabaseInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RebootRelationalDatabaseInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RebootRelationalDatabaseInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RebootRelationalDatabaseInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *RebootRelationalDatabaseInput) SetRelationalDatabaseName(v string) *RebootRelationalDatabaseInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type RebootRelationalDatabaseOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s RebootRelationalDatabaseOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RebootRelationalDatabaseOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *RebootRelationalDatabaseOutput) SetOperations(v []*Operation) *RebootRelationalDatabaseOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes the AWS Region.
+type Region struct {
+ _ struct{} `type:"structure"`
+
+ // The Availability Zones. Follows the format us-east-2a (case-sensitive).
+ AvailabilityZones []*AvailabilityZone `locationName:"availabilityZones" type:"list"`
+
+ // The continent code (e.g., NA, meaning North America).
+ ContinentCode *string `locationName:"continentCode" type:"string"`
+
+ // The description of the AWS Region (e.g., This region is recommended to serve
+ // users in the eastern United States and eastern Canada).
+ Description *string `locationName:"description" type:"string"`
+
+ // The display name (e.g., Ohio).
+ DisplayName *string `locationName:"displayName" type:"string"`
+
+ // The region name (e.g., us-east-2).
+ Name *string `locationName:"name" type:"string" enum:"RegionName"`
+
+ // The Availability Zones for databases. Follows the format us-east-2a (case-sensitive).
+ RelationalDatabaseAvailabilityZones []*AvailabilityZone `locationName:"relationalDatabaseAvailabilityZones" type:"list"`
+}
+
+// String returns the string representation
+func (s Region) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Region) GoString() string {
+ return s.String()
+}
+
+// SetAvailabilityZones sets the AvailabilityZones field's value.
+func (s *Region) SetAvailabilityZones(v []*AvailabilityZone) *Region {
+ s.AvailabilityZones = v
+ return s
+}
+
+// SetContinentCode sets the ContinentCode field's value.
+func (s *Region) SetContinentCode(v string) *Region {
+ s.ContinentCode = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *Region) SetDescription(v string) *Region {
+ s.Description = &v
+ return s
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Region) SetDisplayName(v string) *Region {
+ s.DisplayName = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Region) SetName(v string) *Region {
+ s.Name = &v
+ return s
+}
+
+// SetRelationalDatabaseAvailabilityZones sets the RelationalDatabaseAvailabilityZones field's value.
+func (s *Region) SetRelationalDatabaseAvailabilityZones(v []*AvailabilityZone) *Region {
+ s.RelationalDatabaseAvailabilityZones = v
+ return s
+}
+
+type RegisterContainerImageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The digest of the container image to be registered.
+ //
+ // Digest is a required field
+ Digest *string `locationName:"digest" type:"string" required:"true"`
+
+ // The label for the container image when it's registered to the container service.
+ //
+ // Use a descriptive label that you can use to track the different versions
+ // of your registered container images.
+ //
+ // Use the GetContainerImages action to return the container images registered
+ // to a Lightsail container service. The label is the portion of
+ // the following image name example:
+ //
+ // * :container-service-1..1
+ //
+ // If the name of your container service is mycontainerservice, and the label
+ // that you specify is mystaticwebsite, then the name of the registered container
+ // image will be :mycontainerservice.mystaticwebsite.1.
+ //
+ // The number at the end of these image name examples represents the version
+ // of the registered container image. If you push and register another container
+ // image to the same Lightsail container service, with the same label, then
+ // the version number for the new registered container image will be 2. If you
+ // push and register another container image, the version number will be 3,
+ // and so on.
+ //
+ // Label is a required field
+ Label *string `locationName:"label" min:"1" type:"string" required:"true"`
+
+ // The name of the container service for which to register a container image.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s RegisterContainerImageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RegisterContainerImageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RegisterContainerImageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RegisterContainerImageInput"}
+ if s.Digest == nil {
+ invalidParams.Add(request.NewErrParamRequired("Digest"))
+ }
+ if s.Label == nil {
+ invalidParams.Add(request.NewErrParamRequired("Label"))
+ }
+ if s.Label != nil && len(*s.Label) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Label", 1))
+ }
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDigest sets the Digest field's value.
+func (s *RegisterContainerImageInput) SetDigest(v string) *RegisterContainerImageInput {
+ s.Digest = &v
+ return s
+}
+
+// SetLabel sets the Label field's value.
+func (s *RegisterContainerImageInput) SetLabel(v string) *RegisterContainerImageInput {
+ s.Label = &v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *RegisterContainerImageInput) SetServiceName(v string) *RegisterContainerImageInput {
+ s.ServiceName = &v
+ return s
+}
+
+type RegisterContainerImageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Describes a container image that is registered to an Amazon Lightsail container
+ // service.
+ ContainerImage *ContainerImage `locationName:"containerImage" type:"structure"`
+}
+
+// String returns the string representation
+func (s RegisterContainerImageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RegisterContainerImageOutput) GoString() string {
+ return s.String()
+}
+
+// SetContainerImage sets the ContainerImage field's value.
+func (s *RegisterContainerImageOutput) SetContainerImage(v *ContainerImage) *RegisterContainerImageOutput {
+ s.ContainerImage = v
+ return s
+}
+
+// Describes a database.
+type RelationalDatabase struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the database.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // A Boolean value indicating whether automated backup retention is enabled
+ // for the database.
+ BackupRetentionEnabled *bool `locationName:"backupRetentionEnabled" type:"boolean"`
+
+ // The certificate associated with the database.
+ CaCertificateIdentifier *string `locationName:"caCertificateIdentifier" type:"string"`
+
+ // The timestamp when the database was created. Formatted in Unix time.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The database software (for example, MySQL).
+ Engine *string `locationName:"engine" type:"string"`
+
+ // The database engine version (for example, 5.7.23).
+ EngineVersion *string `locationName:"engineVersion" type:"string"`
+
+ // Describes the hardware of the database.
+ Hardware *RelationalDatabaseHardware `locationName:"hardware" type:"structure"`
+
+ // The latest point in time to which the database can be restored. Formatted
+ // in Unix time.
+ LatestRestorableTime *time.Time `locationName:"latestRestorableTime" type:"timestamp"`
+
+ // The Region name and Availability Zone where the database is located.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the master database created when the Lightsail database resource
+ // is created.
+ MasterDatabaseName *string `locationName:"masterDatabaseName" type:"string"`
+
+ // The master endpoint for the database.
+ MasterEndpoint *RelationalDatabaseEndpoint `locationName:"masterEndpoint" type:"structure"`
+
+ // The master user name of the database.
+ MasterUsername *string `locationName:"masterUsername" type:"string"`
+
+ // The unique name of the database resource in Lightsail.
+ Name *string `locationName:"name" type:"string"`
+
+ // The status of parameter updates for the database.
+ ParameterApplyStatus *string `locationName:"parameterApplyStatus" type:"string"`
+
+ // Describes the pending maintenance actions for the database.
+ PendingMaintenanceActions []*PendingMaintenanceAction `locationName:"pendingMaintenanceActions" type:"list"`
+
+ // Describes pending database value modifications.
+ PendingModifiedValues *PendingModifiedRelationalDatabaseValues `locationName:"pendingModifiedValues" type:"structure"`
+
+ // The daily time range during which automated backups are created for the database
+ // (for example, 16:00-16:30).
+ PreferredBackupWindow *string `locationName:"preferredBackupWindow" type:"string"`
+
+ // The weekly time range during which system maintenance can occur on the database.
+ //
+ // In the format ddd:hh24:mi-ddd:hh24:mi. For example, Tue:17:00-Tue:17:30.
+ PreferredMaintenanceWindow *string `locationName:"preferredMaintenanceWindow" type:"string"`
+
+ // A Boolean value indicating whether the database is publicly accessible.
+ PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"`
+
+ // The blueprint ID for the database. A blueprint describes the major engine
+ // version of a database.
+ RelationalDatabaseBlueprintId *string `locationName:"relationalDatabaseBlueprintId" type:"string"`
+
+ // The bundle ID for the database. A bundle describes the performance specifications
+ // for your database.
+ RelationalDatabaseBundleId *string `locationName:"relationalDatabaseBundleId" type:"string"`
+
+ // The Lightsail resource type for the database (for example, RelationalDatabase).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // Describes the secondary Availability Zone of a high availability database.
+ //
+ // The secondary database is used for failover support of a high availability
+ // database.
+ SecondaryAvailabilityZone *string `locationName:"secondaryAvailabilityZone" type:"string"`
+
+ // Describes the current state of the database.
+ State *string `locationName:"state" type:"string"`
+
+ // The support code for the database. Include this code in your email to support
+ // when you have questions about a database in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s RelationalDatabase) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabase) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *RelationalDatabase) SetArn(v string) *RelationalDatabase {
+ s.Arn = &v
+ return s
+}
+
+// SetBackupRetentionEnabled sets the BackupRetentionEnabled field's value.
+func (s *RelationalDatabase) SetBackupRetentionEnabled(v bool) *RelationalDatabase {
+ s.BackupRetentionEnabled = &v
+ return s
+}
+
+// SetCaCertificateIdentifier sets the CaCertificateIdentifier field's value.
+func (s *RelationalDatabase) SetCaCertificateIdentifier(v string) *RelationalDatabase {
+ s.CaCertificateIdentifier = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *RelationalDatabase) SetCreatedAt(v time.Time) *RelationalDatabase {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetEngine sets the Engine field's value.
+func (s *RelationalDatabase) SetEngine(v string) *RelationalDatabase {
+ s.Engine = &v
+ return s
+}
+
+// SetEngineVersion sets the EngineVersion field's value.
+func (s *RelationalDatabase) SetEngineVersion(v string) *RelationalDatabase {
+ s.EngineVersion = &v
+ return s
+}
+
+// SetHardware sets the Hardware field's value.
+func (s *RelationalDatabase) SetHardware(v *RelationalDatabaseHardware) *RelationalDatabase {
+ s.Hardware = v
+ return s
+}
+
+// SetLatestRestorableTime sets the LatestRestorableTime field's value.
+func (s *RelationalDatabase) SetLatestRestorableTime(v time.Time) *RelationalDatabase {
+ s.LatestRestorableTime = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *RelationalDatabase) SetLocation(v *ResourceLocation) *RelationalDatabase {
+ s.Location = v
+ return s
+}
+
+// SetMasterDatabaseName sets the MasterDatabaseName field's value.
+func (s *RelationalDatabase) SetMasterDatabaseName(v string) *RelationalDatabase {
+ s.MasterDatabaseName = &v
+ return s
+}
+
+// SetMasterEndpoint sets the MasterEndpoint field's value.
+func (s *RelationalDatabase) SetMasterEndpoint(v *RelationalDatabaseEndpoint) *RelationalDatabase {
+ s.MasterEndpoint = v
+ return s
+}
+
+// SetMasterUsername sets the MasterUsername field's value.
+func (s *RelationalDatabase) SetMasterUsername(v string) *RelationalDatabase {
+ s.MasterUsername = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *RelationalDatabase) SetName(v string) *RelationalDatabase {
+ s.Name = &v
+ return s
+}
+
+// SetParameterApplyStatus sets the ParameterApplyStatus field's value.
+func (s *RelationalDatabase) SetParameterApplyStatus(v string) *RelationalDatabase {
+ s.ParameterApplyStatus = &v
+ return s
+}
+
+// SetPendingMaintenanceActions sets the PendingMaintenanceActions field's value.
+func (s *RelationalDatabase) SetPendingMaintenanceActions(v []*PendingMaintenanceAction) *RelationalDatabase {
+ s.PendingMaintenanceActions = v
+ return s
+}
+
+// SetPendingModifiedValues sets the PendingModifiedValues field's value.
+func (s *RelationalDatabase) SetPendingModifiedValues(v *PendingModifiedRelationalDatabaseValues) *RelationalDatabase {
+ s.PendingModifiedValues = v
+ return s
+}
+
+// SetPreferredBackupWindow sets the PreferredBackupWindow field's value.
+func (s *RelationalDatabase) SetPreferredBackupWindow(v string) *RelationalDatabase {
+ s.PreferredBackupWindow = &v
+ return s
+}
+
+// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
+func (s *RelationalDatabase) SetPreferredMaintenanceWindow(v string) *RelationalDatabase {
+ s.PreferredMaintenanceWindow = &v
+ return s
+}
+
+// SetPubliclyAccessible sets the PubliclyAccessible field's value.
+func (s *RelationalDatabase) SetPubliclyAccessible(v bool) *RelationalDatabase {
+ s.PubliclyAccessible = &v
+ return s
+}
+
+// SetRelationalDatabaseBlueprintId sets the RelationalDatabaseBlueprintId field's value.
+func (s *RelationalDatabase) SetRelationalDatabaseBlueprintId(v string) *RelationalDatabase {
+ s.RelationalDatabaseBlueprintId = &v
+ return s
+}
+
+// SetRelationalDatabaseBundleId sets the RelationalDatabaseBundleId field's value.
+func (s *RelationalDatabase) SetRelationalDatabaseBundleId(v string) *RelationalDatabase {
+ s.RelationalDatabaseBundleId = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *RelationalDatabase) SetResourceType(v string) *RelationalDatabase {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSecondaryAvailabilityZone sets the SecondaryAvailabilityZone field's value.
+func (s *RelationalDatabase) SetSecondaryAvailabilityZone(v string) *RelationalDatabase {
+ s.SecondaryAvailabilityZone = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *RelationalDatabase) SetState(v string) *RelationalDatabase {
+ s.State = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *RelationalDatabase) SetSupportCode(v string) *RelationalDatabase {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *RelationalDatabase) SetTags(v []*Tag) *RelationalDatabase {
+ s.Tags = v
+ return s
+}
+
+// Describes a database image, or blueprint. A blueprint describes the major
+// engine version of a database.
+type RelationalDatabaseBlueprint struct {
+ _ struct{} `type:"structure"`
+
+ // The ID for the database blueprint.
+ BlueprintId *string `locationName:"blueprintId" type:"string"`
+
+ // The database software of the database blueprint (for example, MySQL).
+ Engine *string `locationName:"engine" type:"string" enum:"RelationalDatabaseEngine"`
+
+ // The description of the database engine for the database blueprint.
+ EngineDescription *string `locationName:"engineDescription" type:"string"`
+
+ // The database engine version for the database blueprint (for example, 5.7.23).
+ EngineVersion *string `locationName:"engineVersion" type:"string"`
+
+ // The description of the database engine version for the database blueprint.
+ EngineVersionDescription *string `locationName:"engineVersionDescription" type:"string"`
+
+ // A Boolean value indicating whether the engine version is the default for
+ // the database blueprint.
+ IsEngineDefault *bool `locationName:"isEngineDefault" type:"boolean"`
+}
+
+// String returns the string representation
+func (s RelationalDatabaseBlueprint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabaseBlueprint) GoString() string {
+ return s.String()
+}
+
+// SetBlueprintId sets the BlueprintId field's value.
+func (s *RelationalDatabaseBlueprint) SetBlueprintId(v string) *RelationalDatabaseBlueprint {
+ s.BlueprintId = &v
+ return s
+}
+
+// SetEngine sets the Engine field's value.
+func (s *RelationalDatabaseBlueprint) SetEngine(v string) *RelationalDatabaseBlueprint {
+ s.Engine = &v
+ return s
+}
+
+// SetEngineDescription sets the EngineDescription field's value.
+func (s *RelationalDatabaseBlueprint) SetEngineDescription(v string) *RelationalDatabaseBlueprint {
+ s.EngineDescription = &v
+ return s
+}
+
+// SetEngineVersion sets the EngineVersion field's value.
+func (s *RelationalDatabaseBlueprint) SetEngineVersion(v string) *RelationalDatabaseBlueprint {
+ s.EngineVersion = &v
+ return s
+}
+
+// SetEngineVersionDescription sets the EngineVersionDescription field's value.
+func (s *RelationalDatabaseBlueprint) SetEngineVersionDescription(v string) *RelationalDatabaseBlueprint {
+ s.EngineVersionDescription = &v
+ return s
+}
+
+// SetIsEngineDefault sets the IsEngineDefault field's value.
+func (s *RelationalDatabaseBlueprint) SetIsEngineDefault(v bool) *RelationalDatabaseBlueprint {
+ s.IsEngineDefault = &v
+ return s
+}
+
+// Describes a database bundle. A bundle describes the performance specifications
+// of the database.
+type RelationalDatabaseBundle struct {
+ _ struct{} `type:"structure"`
+
+ // The ID for the database bundle.
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // The number of virtual CPUs (vCPUs) for the database bundle.
+ CpuCount *int64 `locationName:"cpuCount" type:"integer"`
+
+ // The size of the disk for the database bundle.
+ DiskSizeInGb *int64 `locationName:"diskSizeInGb" type:"integer"`
+
+ // A Boolean value indicating whether the database bundle is active.
+ IsActive *bool `locationName:"isActive" type:"boolean"`
+
+ // A Boolean value indicating whether the database bundle is encrypted.
+ IsEncrypted *bool `locationName:"isEncrypted" type:"boolean"`
+
+ // The name for the database bundle.
+ Name *string `locationName:"name" type:"string"`
+
+ // The cost of the database bundle in US currency.
+ Price *float64 `locationName:"price" type:"float"`
+
+ // The amount of RAM in GB (for example, 2.0) for the database bundle.
+ RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"`
+
+ // The data transfer rate per month in GB for the database bundle.
+ TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"`
+}
+
+// String returns the string representation
+func (s RelationalDatabaseBundle) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabaseBundle) GoString() string {
+ return s.String()
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *RelationalDatabaseBundle) SetBundleId(v string) *RelationalDatabaseBundle {
+ s.BundleId = &v
+ return s
+}
+
+// SetCpuCount sets the CpuCount field's value.
+func (s *RelationalDatabaseBundle) SetCpuCount(v int64) *RelationalDatabaseBundle {
+ s.CpuCount = &v
+ return s
+}
+
+// SetDiskSizeInGb sets the DiskSizeInGb field's value.
+func (s *RelationalDatabaseBundle) SetDiskSizeInGb(v int64) *RelationalDatabaseBundle {
+ s.DiskSizeInGb = &v
+ return s
+}
+
+// SetIsActive sets the IsActive field's value.
+func (s *RelationalDatabaseBundle) SetIsActive(v bool) *RelationalDatabaseBundle {
+ s.IsActive = &v
+ return s
+}
+
+// SetIsEncrypted sets the IsEncrypted field's value.
+func (s *RelationalDatabaseBundle) SetIsEncrypted(v bool) *RelationalDatabaseBundle {
+ s.IsEncrypted = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *RelationalDatabaseBundle) SetName(v string) *RelationalDatabaseBundle {
+ s.Name = &v
+ return s
+}
+
+// SetPrice sets the Price field's value.
+func (s *RelationalDatabaseBundle) SetPrice(v float64) *RelationalDatabaseBundle {
+ s.Price = &v
+ return s
+}
+
+// SetRamSizeInGb sets the RamSizeInGb field's value.
+func (s *RelationalDatabaseBundle) SetRamSizeInGb(v float64) *RelationalDatabaseBundle {
+ s.RamSizeInGb = &v
+ return s
+}
+
+// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value.
+func (s *RelationalDatabaseBundle) SetTransferPerMonthInGb(v int64) *RelationalDatabaseBundle {
+ s.TransferPerMonthInGb = &v
+ return s
+}
+
+// Describes an endpoint for a database.
+type RelationalDatabaseEndpoint struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the DNS address of the database.
+ Address *string `locationName:"address" type:"string"`
+
+ // Specifies the port that the database is listening on.
+ Port *int64 `locationName:"port" type:"integer"`
+}
+
+// String returns the string representation
+func (s RelationalDatabaseEndpoint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabaseEndpoint) GoString() string {
+ return s.String()
+}
+
+// SetAddress sets the Address field's value.
+func (s *RelationalDatabaseEndpoint) SetAddress(v string) *RelationalDatabaseEndpoint {
+ s.Address = &v
+ return s
+}
+
+// SetPort sets the Port field's value.
+func (s *RelationalDatabaseEndpoint) SetPort(v int64) *RelationalDatabaseEndpoint {
+ s.Port = &v
+ return s
+}
+
+// Describes an event for a database.
+type RelationalDatabaseEvent struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp when the database event was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The category that the database event belongs to.
+ EventCategories []*string `locationName:"eventCategories" type:"list"`
+
+ // The message of the database event.
+ Message *string `locationName:"message" type:"string"`
+
+ // The database that the database event relates to.
+ Resource *string `locationName:"resource" type:"string"`
+}
+
+// String returns the string representation
+func (s RelationalDatabaseEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabaseEvent) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *RelationalDatabaseEvent) SetCreatedAt(v time.Time) *RelationalDatabaseEvent {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetEventCategories sets the EventCategories field's value.
+func (s *RelationalDatabaseEvent) SetEventCategories(v []*string) *RelationalDatabaseEvent {
+ s.EventCategories = v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *RelationalDatabaseEvent) SetMessage(v string) *RelationalDatabaseEvent {
+ s.Message = &v
+ return s
+}
+
+// SetResource sets the Resource field's value.
+func (s *RelationalDatabaseEvent) SetResource(v string) *RelationalDatabaseEvent {
+ s.Resource = &v
+ return s
+}
+
+// Describes the hardware of a database.
+type RelationalDatabaseHardware struct {
+ _ struct{} `type:"structure"`
+
+ // The number of vCPUs for the database.
+ CpuCount *int64 `locationName:"cpuCount" type:"integer"`
+
+ // The size of the disk for the database.
+ DiskSizeInGb *int64 `locationName:"diskSizeInGb" type:"integer"`
+
+ // The amount of RAM in GB for the database.
+ RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"`
+}
+
+// String returns the string representation
+func (s RelationalDatabaseHardware) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabaseHardware) GoString() string {
+ return s.String()
+}
+
+// SetCpuCount sets the CpuCount field's value.
+func (s *RelationalDatabaseHardware) SetCpuCount(v int64) *RelationalDatabaseHardware {
+ s.CpuCount = &v
+ return s
+}
+
+// SetDiskSizeInGb sets the DiskSizeInGb field's value.
+func (s *RelationalDatabaseHardware) SetDiskSizeInGb(v int64) *RelationalDatabaseHardware {
+ s.DiskSizeInGb = &v
+ return s
+}
+
+// SetRamSizeInGb sets the RamSizeInGb field's value.
+func (s *RelationalDatabaseHardware) SetRamSizeInGb(v float64) *RelationalDatabaseHardware {
+ s.RamSizeInGb = &v
+ return s
+}
+
+// Describes the parameters of a database.
+type RelationalDatabaseParameter struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the valid range of values for the parameter.
+ AllowedValues *string `locationName:"allowedValues" type:"string"`
+
+ // Indicates when parameter updates are applied.
+ //
+ // Can be immediate or pending-reboot.
+ ApplyMethod *string `locationName:"applyMethod" type:"string"`
+
+ // Specifies the engine-specific parameter type.
+ ApplyType *string `locationName:"applyType" type:"string"`
+
+ // Specifies the valid data type for the parameter.
+ DataType *string `locationName:"dataType" type:"string"`
+
+ // Provides a description of the parameter.
+ Description *string `locationName:"description" type:"string"`
+
+ // A Boolean value indicating whether the parameter can be modified.
+ IsModifiable *bool `locationName:"isModifiable" type:"boolean"`
+
+ // Specifies the name of the parameter.
+ ParameterName *string `locationName:"parameterName" type:"string"`
+
+ // Specifies the value of the parameter.
+ ParameterValue *string `locationName:"parameterValue" type:"string"`
+}
+
+// String returns the string representation
+func (s RelationalDatabaseParameter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabaseParameter) GoString() string {
+ return s.String()
+}
+
+// SetAllowedValues sets the AllowedValues field's value.
+func (s *RelationalDatabaseParameter) SetAllowedValues(v string) *RelationalDatabaseParameter {
+ s.AllowedValues = &v
+ return s
+}
+
+// SetApplyMethod sets the ApplyMethod field's value.
+func (s *RelationalDatabaseParameter) SetApplyMethod(v string) *RelationalDatabaseParameter {
+ s.ApplyMethod = &v
+ return s
+}
+
+// SetApplyType sets the ApplyType field's value.
+func (s *RelationalDatabaseParameter) SetApplyType(v string) *RelationalDatabaseParameter {
+ s.ApplyType = &v
+ return s
+}
+
+// SetDataType sets the DataType field's value.
+func (s *RelationalDatabaseParameter) SetDataType(v string) *RelationalDatabaseParameter {
+ s.DataType = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *RelationalDatabaseParameter) SetDescription(v string) *RelationalDatabaseParameter {
+ s.Description = &v
+ return s
+}
+
+// SetIsModifiable sets the IsModifiable field's value.
+func (s *RelationalDatabaseParameter) SetIsModifiable(v bool) *RelationalDatabaseParameter {
+ s.IsModifiable = &v
+ return s
+}
+
+// SetParameterName sets the ParameterName field's value.
+func (s *RelationalDatabaseParameter) SetParameterName(v string) *RelationalDatabaseParameter {
+ s.ParameterName = &v
+ return s
+}
+
+// SetParameterValue sets the ParameterValue field's value.
+func (s *RelationalDatabaseParameter) SetParameterValue(v string) *RelationalDatabaseParameter {
+ s.ParameterValue = &v
+ return s
+}
+
+// Describes a database snapshot.
+type RelationalDatabaseSnapshot struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the database snapshot.
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The timestamp when the database snapshot was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The software of the database snapshot (for example, MySQL)
+ Engine *string `locationName:"engine" type:"string"`
+
+ // The database engine version for the database snapshot (for example, 5.7.23).
+ EngineVersion *string `locationName:"engineVersion" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the database from which the database snapshot
+ // was created.
+ FromRelationalDatabaseArn *string `locationName:"fromRelationalDatabaseArn" type:"string"`
+
+ // The blueprint ID of the database from which the database snapshot was created.
+ // A blueprint describes the major engine version of a database.
+ FromRelationalDatabaseBlueprintId *string `locationName:"fromRelationalDatabaseBlueprintId" type:"string"`
+
+ // The bundle ID of the database from which the database snapshot was created.
+ FromRelationalDatabaseBundleId *string `locationName:"fromRelationalDatabaseBundleId" type:"string"`
+
+ // The name of the source database from which the database snapshot was created.
+ FromRelationalDatabaseName *string `locationName:"fromRelationalDatabaseName" type:"string"`
+
+ // The Region name and Availability Zone where the database snapshot is located.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the database snapshot.
+ Name *string `locationName:"name" type:"string"`
+
+ // The Lightsail resource type.
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The size of the disk in GB (for example, 32) for the database snapshot.
+ SizeInGb *int64 `locationName:"sizeInGb" type:"integer"`
+
+ // The state of the database snapshot.
+ State *string `locationName:"state" type:"string"`
+
+ // The support code for the database snapshot. Include this code in your email
+ // to support when you have questions about a database snapshot in Lightsail.
+ // This code enables our support team to look up your Lightsail information
+ // more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+
+ // The tag keys and optional values for the resource. For more information about
+ // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s RelationalDatabaseSnapshot) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RelationalDatabaseSnapshot) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *RelationalDatabaseSnapshot) SetArn(v string) *RelationalDatabaseSnapshot {
+ s.Arn = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *RelationalDatabaseSnapshot) SetCreatedAt(v time.Time) *RelationalDatabaseSnapshot {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetEngine sets the Engine field's value.
+func (s *RelationalDatabaseSnapshot) SetEngine(v string) *RelationalDatabaseSnapshot {
+ s.Engine = &v
+ return s
+}
+
+// SetEngineVersion sets the EngineVersion field's value.
+func (s *RelationalDatabaseSnapshot) SetEngineVersion(v string) *RelationalDatabaseSnapshot {
+ s.EngineVersion = &v
+ return s
+}
+
+// SetFromRelationalDatabaseArn sets the FromRelationalDatabaseArn field's value.
+func (s *RelationalDatabaseSnapshot) SetFromRelationalDatabaseArn(v string) *RelationalDatabaseSnapshot {
+ s.FromRelationalDatabaseArn = &v
+ return s
+}
+
+// SetFromRelationalDatabaseBlueprintId sets the FromRelationalDatabaseBlueprintId field's value.
+func (s *RelationalDatabaseSnapshot) SetFromRelationalDatabaseBlueprintId(v string) *RelationalDatabaseSnapshot {
+ s.FromRelationalDatabaseBlueprintId = &v
+ return s
+}
+
+// SetFromRelationalDatabaseBundleId sets the FromRelationalDatabaseBundleId field's value.
+func (s *RelationalDatabaseSnapshot) SetFromRelationalDatabaseBundleId(v string) *RelationalDatabaseSnapshot {
+ s.FromRelationalDatabaseBundleId = &v
+ return s
+}
+
+// SetFromRelationalDatabaseName sets the FromRelationalDatabaseName field's value.
+func (s *RelationalDatabaseSnapshot) SetFromRelationalDatabaseName(v string) *RelationalDatabaseSnapshot {
+ s.FromRelationalDatabaseName = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *RelationalDatabaseSnapshot) SetLocation(v *ResourceLocation) *RelationalDatabaseSnapshot {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *RelationalDatabaseSnapshot) SetName(v string) *RelationalDatabaseSnapshot {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *RelationalDatabaseSnapshot) SetResourceType(v string) *RelationalDatabaseSnapshot {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSizeInGb sets the SizeInGb field's value.
+func (s *RelationalDatabaseSnapshot) SetSizeInGb(v int64) *RelationalDatabaseSnapshot {
+ s.SizeInGb = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *RelationalDatabaseSnapshot) SetState(v string) *RelationalDatabaseSnapshot {
+ s.State = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *RelationalDatabaseSnapshot) SetSupportCode(v string) *RelationalDatabaseSnapshot {
+ s.SupportCode = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *RelationalDatabaseSnapshot) SetTags(v []*Tag) *RelationalDatabaseSnapshot {
+ s.Tags = v
+ return s
+}
+
+type ReleaseStaticIpInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the static IP to delete.
+ //
+ // StaticIpName is a required field
+ StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ReleaseStaticIpInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReleaseStaticIpInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReleaseStaticIpInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReleaseStaticIpInput"}
+ if s.StaticIpName == nil {
+ invalidParams.Add(request.NewErrParamRequired("StaticIpName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetStaticIpName sets the StaticIpName field's value.
+func (s *ReleaseStaticIpInput) SetStaticIpName(v string) *ReleaseStaticIpInput {
+ s.StaticIpName = &v
+ return s
+}
+
+type ReleaseStaticIpOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s ReleaseStaticIpOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReleaseStaticIpOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *ReleaseStaticIpOutput) SetOperations(v []*Operation) *ReleaseStaticIpOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes the status of a SSL/TLS certificate renewal managed by Amazon Lightsail.
+type RenewalSummary struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the domain validation records of the certificate.
+ DomainValidationRecords []*DomainValidationRecord `locationName:"domainValidationRecords" type:"list"`
+
+ // The renewal status of the certificate.
+ //
+ // The following renewal status are possible:
+ //
+ // * PendingAutoRenewal - Lightsail is attempting to automatically validate
+ // the domain names of the certificate. No further action is required.
+ //
+ // * PendingValidation - Lightsail couldn't automatically validate one or
+ // more domain names of the certificate. You must take action to validate
+ // these domain names or the certificate won't be renewed. Check to make
+ // sure your certificate's domain validation records exist in your domain's
+ // DNS, and that your certificate remains in use.
+ //
+ // * Success - All domain names in the certificate are validated, and Lightsail
+ // renewed the certificate. No further action is required.
+ //
+ // * Failed - One or more domain names were not validated before the certificate
+ // expired, and Lightsail did not renew the certificate. You can request
+ // a new certificate using the CreateCertificate action.
+ RenewalStatus *string `locationName:"renewalStatus" type:"string" enum:"RenewalStatus"`
+
+ // The reason for the renewal status of the certificate.
+ RenewalStatusReason *string `locationName:"renewalStatusReason" type:"string"`
+
+ // The timestamp when the certificate was last updated.
+ UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp"`
+}
+
+// String returns the string representation
+func (s RenewalSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RenewalSummary) GoString() string {
+ return s.String()
+}
+
+// SetDomainValidationRecords sets the DomainValidationRecords field's value.
+func (s *RenewalSummary) SetDomainValidationRecords(v []*DomainValidationRecord) *RenewalSummary {
+ s.DomainValidationRecords = v
+ return s
+}
+
+// SetRenewalStatus sets the RenewalStatus field's value.
+func (s *RenewalSummary) SetRenewalStatus(v string) *RenewalSummary {
+ s.RenewalStatus = &v
+ return s
+}
+
+// SetRenewalStatusReason sets the RenewalStatusReason field's value.
+func (s *RenewalSummary) SetRenewalStatusReason(v string) *RenewalSummary {
+ s.RenewalStatusReason = &v
+ return s
+}
+
+// SetUpdatedAt sets the UpdatedAt field's value.
+func (s *RenewalSummary) SetUpdatedAt(v time.Time) *RenewalSummary {
+ s.UpdatedAt = &v
+ return s
+}
+
+type ResetDistributionCacheInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the distribution for which to reset cache.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ DistributionName *string `locationName:"distributionName" type:"string"`
+}
+
+// String returns the string representation
+func (s ResetDistributionCacheInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResetDistributionCacheInput) GoString() string {
+ return s.String()
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *ResetDistributionCacheInput) SetDistributionName(v string) *ResetDistributionCacheInput {
+ s.DistributionName = &v
+ return s
+}
+
+type ResetDistributionCacheOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The timestamp of the reset cache request (e.g., 1479734909.17) in Unix time
+ // format.
+ CreateTime *time.Time `locationName:"createTime" type:"timestamp"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+
+ // The status of the reset cache request.
+ Status *string `locationName:"status" type:"string"`
+}
+
+// String returns the string representation
+func (s ResetDistributionCacheOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResetDistributionCacheOutput) GoString() string {
+ return s.String()
+}
+
+// SetCreateTime sets the CreateTime field's value.
+func (s *ResetDistributionCacheOutput) SetCreateTime(v time.Time) *ResetDistributionCacheOutput {
+ s.CreateTime = &v
+ return s
+}
+
+// SetOperation sets the Operation field's value.
+func (s *ResetDistributionCacheOutput) SetOperation(v *Operation) *ResetDistributionCacheOutput {
+ s.Operation = v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ResetDistributionCacheOutput) SetStatus(v string) *ResetDistributionCacheOutput {
+ s.Status = &v
+ return s
+}
+
+// Describes the resource location.
+type ResourceLocation struct {
+ _ struct{} `type:"structure"`
+
+ // The Availability Zone. Follows the format us-east-2a (case-sensitive).
+ AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
+
+ // The AWS Region name.
+ RegionName *string `locationName:"regionName" type:"string" enum:"RegionName"`
+}
+
+// String returns the string representation
+func (s ResourceLocation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResourceLocation) GoString() string {
+ return s.String()
+}
+
+// SetAvailabilityZone sets the AvailabilityZone field's value.
+func (s *ResourceLocation) SetAvailabilityZone(v string) *ResourceLocation {
+ s.AvailabilityZone = &v
+ return s
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *ResourceLocation) SetRegionName(v string) *ResourceLocation {
+ s.RegionName = &v
+ return s
+}
+
+// Describes an Amazon Lightsail instance that has access to a Lightsail bucket.
+type ResourceReceivingAccess struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the Lightsail instance.
+ Name *string `locationName:"name" type:"string"`
+
+ // The Lightsail resource type (for example, Instance).
+ ResourceType *string `locationName:"resourceType" type:"string"`
+}
+
+// String returns the string representation
+func (s ResourceReceivingAccess) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResourceReceivingAccess) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *ResourceReceivingAccess) SetName(v string) *ResourceReceivingAccess {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *ResourceReceivingAccess) SetResourceType(v string) *ResourceReceivingAccess {
+ s.ResourceType = &v
+ return s
+}
+
+// Describes the domain name system (DNS) records to add to your domain's DNS
+// to validate it for an Amazon Lightsail certificate.
+type ResourceRecord struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the record.
+ Name *string `locationName:"name" type:"string"`
+
+ // The DNS record type.
+ Type *string `locationName:"type" type:"string"`
+
+ // The value for the DNS record.
+ Value *string `locationName:"value" type:"string"`
+}
+
+// String returns the string representation
+func (s ResourceRecord) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResourceRecord) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *ResourceRecord) SetName(v string) *ResourceRecord {
+ s.Name = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *ResourceRecord) SetType(v string) *ResourceRecord {
+ s.Type = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *ResourceRecord) SetValue(v string) *ResourceRecord {
+ s.Value = &v
+ return s
+}
+
+type SendContactMethodVerificationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The protocol to verify, such as Email or SMS (text messaging).
+ //
+ // Protocol is a required field
+ Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"ContactMethodVerificationProtocol"`
+}
+
+// String returns the string representation
+func (s SendContactMethodVerificationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendContactMethodVerificationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SendContactMethodVerificationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SendContactMethodVerificationInput"}
+ if s.Protocol == nil {
+ invalidParams.Add(request.NewErrParamRequired("Protocol"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *SendContactMethodVerificationInput) SetProtocol(v string) *SendContactMethodVerificationInput {
+ s.Protocol = &v
+ return s
+}
+
+type SendContactMethodVerificationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s SendContactMethodVerificationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendContactMethodVerificationOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *SendContactMethodVerificationOutput) SetOperations(v []*Operation) *SendContactMethodVerificationOutput {
+ s.Operations = v
+ return s
+}
+
+// A general service exception.
+type ServiceException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Code_ *string `locationName:"code" type:"string"`
+
+ Docs *string `locationName:"docs" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ Tip *string `locationName:"tip" type:"string"`
+}
+
+// String returns the string representation
+func (s ServiceException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ServiceException) GoString() string {
+ return s.String()
+}
+
+func newErrorServiceException(v protocol.ResponseMetadata) error {
+ return &ServiceException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ServiceException) Code() string {
+ return "ServiceException"
+}
+
+// Message returns the exception's message.
+func (s *ServiceException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ServiceException) OrigErr() error {
+ return nil
+}
+
+func (s *ServiceException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ServiceException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ServiceException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type SetIpAddressTypeInput struct {
+ _ struct{} `type:"structure"`
+
+ // The IP address type to set for the specified resource.
+ //
+ // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ //
+ // IpAddressType is a required field
+ IpAddressType *string `locationName:"ipAddressType" type:"string" required:"true" enum:"IpAddressType"`
+
+ // The name of the resource for which to set the IP address type.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+
+ // The resource type.
+ //
+ // The possible values are Distribution, Instance, and LoadBalancer.
+ //
+ // Distribution-related APIs are available only in the N. Virginia (us-east-1)
+ // AWS Region. Set your AWS Region configuration to us-east-1 to create, view,
+ // or edit distributions.
+ //
+ // ResourceType is a required field
+ ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"`
+}
+
+// String returns the string representation
+func (s SetIpAddressTypeInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetIpAddressTypeInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SetIpAddressTypeInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SetIpAddressTypeInput"}
+ if s.IpAddressType == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpAddressType"))
+ }
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+ if s.ResourceType == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIpAddressType sets the IpAddressType field's value.
+func (s *SetIpAddressTypeInput) SetIpAddressType(v string) *SetIpAddressTypeInput {
+ s.IpAddressType = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *SetIpAddressTypeInput) SetResourceName(v string) *SetIpAddressTypeInput {
+ s.ResourceName = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *SetIpAddressTypeInput) SetResourceType(v string) *SetIpAddressTypeInput {
+ s.ResourceType = &v
+ return s
+}
+
+type SetIpAddressTypeOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s SetIpAddressTypeOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetIpAddressTypeOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *SetIpAddressTypeOutput) SetOperations(v []*Operation) *SetIpAddressTypeOutput {
+ s.Operations = v
+ return s
+}
+
+type SetResourceAccessForBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ // The access setting.
+ //
+ // The following access settings are available:
+ //
+ // * allow - Allows access to the bucket and its objects.
+ //
+ // * deny - Denies access to the bucket and its objects. Use this setting
+ // to remove access for a resource previously set to allow.
+ //
+ // Access is a required field
+ Access *string `locationName:"access" type:"string" required:"true" enum:"ResourceBucketAccess"`
+
+ // The name of the bucket for which to set access to another Lightsail resource.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+
+ // The name of the Lightsail instance for which to set bucket access. The instance
+ // must be in a running or stopped state.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s SetResourceAccessForBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetResourceAccessForBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SetResourceAccessForBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SetResourceAccessForBucketInput"}
+ if s.Access == nil {
+ invalidParams.Add(request.NewErrParamRequired("Access"))
+ }
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccess sets the Access field's value.
+func (s *SetResourceAccessForBucketInput) SetAccess(v string) *SetResourceAccessForBucketInput {
+ s.Access = &v
+ return s
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *SetResourceAccessForBucketInput) SetBucketName(v string) *SetResourceAccessForBucketInput {
+ s.BucketName = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *SetResourceAccessForBucketInput) SetResourceName(v string) *SetResourceAccessForBucketInput {
+ s.ResourceName = &v
+ return s
+}
+
+type SetResourceAccessForBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s SetResourceAccessForBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetResourceAccessForBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *SetResourceAccessForBucketOutput) SetOperations(v []*Operation) *SetResourceAccessForBucketOutput {
+ s.Operations = v
+ return s
+}
+
+type StartInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the instance (a virtual private server) to start.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s StartInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StartInstanceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StartInstanceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StartInstanceInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *StartInstanceInput) SetInstanceName(v string) *StartInstanceInput {
+ s.InstanceName = &v
+ return s
+}
+
+type StartInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s StartInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StartInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *StartInstanceOutput) SetOperations(v []*Operation) *StartInstanceOutput {
+ s.Operations = v
+ return s
+}
+
+type StartRelationalDatabaseInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of your database to start.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s StartRelationalDatabaseInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StartRelationalDatabaseInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StartRelationalDatabaseInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StartRelationalDatabaseInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *StartRelationalDatabaseInput) SetRelationalDatabaseName(v string) *StartRelationalDatabaseInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type StartRelationalDatabaseOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s StartRelationalDatabaseOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StartRelationalDatabaseOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *StartRelationalDatabaseOutput) SetOperations(v []*Operation) *StartRelationalDatabaseOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes a static IP.
+type StaticIp struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the static IP (e.g., arn:aws:lightsail:us-east-2:123456789101:StaticIp/9cbb4a9e-f8e3-4dfe-b57e-12345EXAMPLE).
+ Arn *string `locationName:"arn" type:"string"`
+
+ // The instance where the static IP is attached (e.g., Amazon_Linux-1GB-Ohio-1).
+ AttachedTo *string `locationName:"attachedTo" type:"string"`
+
+ // The timestamp when the static IP was created (e.g., 1479735304.222).
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The static IP address.
+ IpAddress *string `locationName:"ipAddress" type:"string"`
+
+ // A Boolean value indicating whether the static IP is attached.
+ IsAttached *bool `locationName:"isAttached" type:"boolean"`
+
+ // The region and Availability Zone where the static IP was created.
+ Location *ResourceLocation `locationName:"location" type:"structure"`
+
+ // The name of the static IP (e.g., StaticIP-Ohio-EXAMPLE).
+ Name *string `locationName:"name" type:"string"`
+
+ // The resource type (usually StaticIp).
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
+
+ // The support code. Include this code in your email to support when you have
+ // questions about an instance or another resource in Lightsail. This code enables
+ // our support team to look up your Lightsail information more easily.
+ SupportCode *string `locationName:"supportCode" type:"string"`
+}
+
+// String returns the string representation
+func (s StaticIp) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StaticIp) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *StaticIp) SetArn(v string) *StaticIp {
+ s.Arn = &v
+ return s
+}
+
+// SetAttachedTo sets the AttachedTo field's value.
+func (s *StaticIp) SetAttachedTo(v string) *StaticIp {
+ s.AttachedTo = &v
+ return s
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *StaticIp) SetCreatedAt(v time.Time) *StaticIp {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetIpAddress sets the IpAddress field's value.
+func (s *StaticIp) SetIpAddress(v string) *StaticIp {
+ s.IpAddress = &v
+ return s
+}
+
+// SetIsAttached sets the IsAttached field's value.
+func (s *StaticIp) SetIsAttached(v bool) *StaticIp {
+ s.IsAttached = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *StaticIp) SetLocation(v *ResourceLocation) *StaticIp {
+ s.Location = v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *StaticIp) SetName(v string) *StaticIp {
+ s.Name = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *StaticIp) SetResourceType(v string) *StaticIp {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSupportCode sets the SupportCode field's value.
+func (s *StaticIp) SetSupportCode(v string) *StaticIp {
+ s.SupportCode = &v
+ return s
+}
+
+type StopInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // When set to True, forces a Lightsail instance that is stuck in a stopping
+ // state to stop.
+ //
+ // Only use the force parameter if your instance is stuck in the stopping state.
+ // In any other state, your instance should stop normally without adding this
+ // parameter to your API request.
+ Force *bool `locationName:"force" type:"boolean"`
+
+ // The name of the instance (a virtual private server) to stop.
+ //
+ // InstanceName is a required field
+ InstanceName *string `locationName:"instanceName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s StopInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StopInstanceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StopInstanceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StopInstanceInput"}
+ if s.InstanceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetForce sets the Force field's value.
+func (s *StopInstanceInput) SetForce(v bool) *StopInstanceInput {
+ s.Force = &v
+ return s
+}
+
+// SetInstanceName sets the InstanceName field's value.
+func (s *StopInstanceInput) SetInstanceName(v string) *StopInstanceInput {
+ s.InstanceName = &v
+ return s
+}
+
+type StopInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s StopInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StopInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *StopInstanceOutput) SetOperations(v []*Operation) *StopInstanceOutput {
+ s.Operations = v
+ return s
+}
+
+type StopRelationalDatabaseInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of your database to stop.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // The name of your new database snapshot to be created before stopping your
+ // database.
+ RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string"`
+}
+
+// String returns the string representation
+func (s StopRelationalDatabaseInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StopRelationalDatabaseInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StopRelationalDatabaseInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StopRelationalDatabaseInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *StopRelationalDatabaseInput) SetRelationalDatabaseName(v string) *StopRelationalDatabaseInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value.
+func (s *StopRelationalDatabaseInput) SetRelationalDatabaseSnapshotName(v string) *StopRelationalDatabaseInput {
+ s.RelationalDatabaseSnapshotName = &v
+ return s
+}
+
+type StopRelationalDatabaseOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s StopRelationalDatabaseOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StopRelationalDatabaseOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *StopRelationalDatabaseOutput) SetOperations(v []*Operation) *StopRelationalDatabaseOutput {
+ s.Operations = v
+ return s
+}
+
+// Describes a tag key and optional value assigned to an Amazon Lightsail resource.
+//
+// For more information about tags in Lightsail, see the Amazon Lightsail Developer
+// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags).
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // The key of the tag.
+ //
+ // Constraints: Tag keys accept a maximum of 128 letters, numbers, spaces in
+ // UTF-8, or the following characters: + - = . _ : / @
+ Key *string `locationName:"key" type:"string"`
+
+ // The value of the tag.
+ //
+ // Constraints: Tag values accept a maximum of 256 letters, numbers, spaces
+ // in UTF-8, or the following characters: + - = . _ : / @
+ Value *string `locationName:"value" type:"string"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
+
+type TagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the resource to which you want to add a
+ // tag.
+ ResourceArn *string `locationName:"resourceArn" type:"string"`
+
+ // The name of the resource to which you are adding tags.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+
+ // The tag key and optional value.
+ //
+ // Tags is a required field
+ Tags []*Tag `locationName:"tags" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s TagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TagResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+ if s.Tags == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tags"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *TagResourceInput) SetResourceName(v string) *TagResourceInput {
+ s.ResourceName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput {
+ s.Tags = v
+ return s
+}
+
+type TagResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s TagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *TagResourceOutput) SetOperations(v []*Operation) *TagResourceOutput {
+ s.Operations = v
+ return s
+}
+
+type TestAlarmInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the alarm to test.
+ //
+ // AlarmName is a required field
+ AlarmName *string `locationName:"alarmName" type:"string" required:"true"`
+
+ // The alarm state to test.
+ //
+ // An alarm has the following possible states that can be tested:
+ //
+ // * ALARM - The metric is outside of the defined threshold.
+ //
+ // * INSUFFICIENT_DATA - The alarm has just started, the metric is not available,
+ // or not enough data is available for the metric to determine the alarm
+ // state.
+ //
+ // * OK - The metric is within the defined threshold.
+ //
+ // State is a required field
+ State *string `locationName:"state" type:"string" required:"true" enum:"AlarmState"`
+}
+
+// String returns the string representation
+func (s TestAlarmInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TestAlarmInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TestAlarmInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TestAlarmInput"}
+ if s.AlarmName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AlarmName"))
+ }
+ if s.State == nil {
+ invalidParams.Add(request.NewErrParamRequired("State"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAlarmName sets the AlarmName field's value.
+func (s *TestAlarmInput) SetAlarmName(v string) *TestAlarmInput {
+ s.AlarmName = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *TestAlarmInput) SetState(v string) *TestAlarmInput {
+ s.State = &v
+ return s
+}
+
+type TestAlarmOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s TestAlarmOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TestAlarmOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *TestAlarmOutput) SetOperations(v []*Operation) *TestAlarmOutput {
+ s.Operations = v
+ return s
+}
+
+// Lightsail throws this exception when the user has not been authenticated.
+type UnauthenticatedException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Code_ *string `locationName:"code" type:"string"`
+
+ Docs *string `locationName:"docs" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ Tip *string `locationName:"tip" type:"string"`
+}
+
+// String returns the string representation
+func (s UnauthenticatedException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UnauthenticatedException) GoString() string {
+ return s.String()
+}
+
+func newErrorUnauthenticatedException(v protocol.ResponseMetadata) error {
+ return &UnauthenticatedException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *UnauthenticatedException) Code() string {
+ return "UnauthenticatedException"
+}
+
+// Message returns the exception's message.
+func (s *UnauthenticatedException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *UnauthenticatedException) OrigErr() error {
+ return nil
+}
+
+func (s *UnauthenticatedException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *UnauthenticatedException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *UnauthenticatedException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type UnpeerVpcInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s UnpeerVpcInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UnpeerVpcInput) GoString() string {
+ return s.String()
+}
+
+type UnpeerVpcOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s UnpeerVpcOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UnpeerVpcOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *UnpeerVpcOutput) SetOperation(v *Operation) *UnpeerVpcOutput {
+ s.Operation = v
+ return s
+}
+
+type UntagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the resource from which you want to remove
+ // a tag.
+ ResourceArn *string `locationName:"resourceArn" type:"string"`
+
+ // The name of the resource from which you are removing a tag.
+ //
+ // ResourceName is a required field
+ ResourceName *string `locationName:"resourceName" type:"string" required:"true"`
+
+ // The tag keys to delete from the specified resource.
+ //
+ // TagKeys is a required field
+ TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s UntagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UntagResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
+ if s.ResourceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceName"))
+ }
+ if s.TagKeys == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagKeys"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+// SetResourceName sets the ResourceName field's value.
+func (s *UntagResourceInput) SetResourceName(v string) *UntagResourceInput {
+ s.ResourceName = &v
+ return s
+}
+
+// SetTagKeys sets the TagKeys field's value.
+func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
+ s.TagKeys = v
+ return s
+}
+
+type UntagResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s UntagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *UntagResourceOutput) SetOperations(v []*Operation) *UntagResourceOutput {
+ s.Operations = v
+ return s
+}
+
+type UpdateBucketBundleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket for which to update the bundle.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+
+ // The ID of the new bundle to apply to the bucket.
+ //
+ // Use the GetBucketBundles action to get a list of bundle IDs that you can
+ // specify.
+ //
+ // BundleId is a required field
+ BundleId *string `locationName:"bundleId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateBucketBundleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateBucketBundleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateBucketBundleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateBucketBundleInput"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+ if s.BundleId == nil {
+ invalidParams.Add(request.NewErrParamRequired("BundleId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *UpdateBucketBundleInput) SetBucketName(v string) *UpdateBucketBundleInput {
+ s.BucketName = &v
+ return s
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *UpdateBucketBundleInput) SetBundleId(v string) *UpdateBucketBundleInput {
+ s.BundleId = &v
+ return s
+}
+
+type UpdateBucketBundleOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateBucketBundleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateBucketBundleOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *UpdateBucketBundleOutput) SetOperations(v []*Operation) *UpdateBucketBundleOutput {
+ s.Operations = v
+ return s
+}
+
+type UpdateBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that sets the public accessibility of objects in the specified
+ // bucket.
+ AccessRules *AccessRules `locationName:"accessRules" type:"structure"`
+
+ // The name of the bucket to update.
+ //
+ // BucketName is a required field
+ BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"`
+
+ // An array of strings to specify the AWS account IDs that can access the bucket.
+ //
+ // You can give a maximum of 10 AWS accounts access to a bucket.
+ ReadonlyAccessAccounts []*string `locationName:"readonlyAccessAccounts" type:"list"`
+
+ // Specifies whether to enable or suspend versioning of objects in the bucket.
+ //
+ // The following options can be specified:
+ //
+ // * Enabled - Enables versioning of objects in the specified bucket.
+ //
+ // * Suspended - Suspends versioning of objects in the specified bucket.
+ // Existing object versions are retained.
+ Versioning *string `locationName:"versioning" type:"string"`
+}
+
+// String returns the string representation
+func (s UpdateBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateBucketInput"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.BucketName != nil && len(*s.BucketName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BucketName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessRules sets the AccessRules field's value.
+func (s *UpdateBucketInput) SetAccessRules(v *AccessRules) *UpdateBucketInput {
+ s.AccessRules = v
+ return s
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *UpdateBucketInput) SetBucketName(v string) *UpdateBucketInput {
+ s.BucketName = &v
+ return s
+}
+
+// SetReadonlyAccessAccounts sets the ReadonlyAccessAccounts field's value.
+func (s *UpdateBucketInput) SetReadonlyAccessAccounts(v []*string) *UpdateBucketInput {
+ s.ReadonlyAccessAccounts = v
+ return s
+}
+
+// SetVersioning sets the Versioning field's value.
+func (s *UpdateBucketInput) SetVersioning(v string) *UpdateBucketInput {
+ s.Versioning = &v
+ return s
+}
+
+type UpdateBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the bucket that is updated.
+ Bucket *Bucket `locationName:"bucket" type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UpdateBucketOutput) SetBucket(v *Bucket) *UpdateBucketOutput {
+ s.Bucket = v
+ return s
+}
+
+// SetOperations sets the Operations field's value.
+func (s *UpdateBucketOutput) SetOperations(v []*Operation) *UpdateBucketOutput {
+ s.Operations = v
+ return s
+}
+
+type UpdateContainerServiceInput struct {
+ _ struct{} `type:"structure"`
+
+ // A Boolean value to indicate whether the container service is disabled.
+ IsDisabled *bool `locationName:"isDisabled" type:"boolean"`
+
+ // The power for the container service.
+ //
+ // The power specifies the amount of memory, vCPUs, and base monthly cost of
+ // each node of the container service. The power and scale of a container service
+ // makes up its configured capacity. To determine the monthly price of your
+ // container service, multiply the base price of the power with the scale (the
+ // number of nodes) of the service.
+ //
+ // Use the GetContainerServicePowers action to view the specifications of each
+ // power option.
+ Power *string `locationName:"power" type:"string" enum:"ContainerServicePowerName"`
+
+ // The public domain names to use with the container service, such as example.com
+ // and www.example.com.
+ //
+ // You can specify up to four public domain names for a container service. The
+ // domain names that you specify are used when you create a deployment with
+ // a container configured as the public endpoint of your container service.
+ //
+ // If you don't specify public domain names, then you can use the default domain
+ // of the container service.
+ //
+ // You must create and validate an SSL/TLS certificate before you can use public
+ // domain names with your container service. Use the CreateCertificate action
+ // to create a certificate for the public domain names you want to use with
+ // your container service.
+ //
+ // You can specify public domain names using a string to array map as shown
+ // in the example later on this page.
+ PublicDomainNames map[string][]*string `locationName:"publicDomainNames" type:"map"`
+
+ // The scale for the container service.
+ //
+ // The scale specifies the allocated compute nodes of the container service.
+ // The power and scale of a container service makes up its configured capacity.
+ // To determine the monthly price of your container service, multiply the base
+ // price of the power with the scale (the number of nodes) of the service.
+ Scale *int64 `locationName:"scale" min:"1" type:"integer"`
+
+ // The name of the container service to update.
+ //
+ // ServiceName is a required field
+ ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateContainerServiceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateContainerServiceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateContainerServiceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateContainerServiceInput"}
+ if s.Scale != nil && *s.Scale < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Scale", 1))
+ }
+ if s.ServiceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServiceName"))
+ }
+ if s.ServiceName != nil && len(*s.ServiceName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIsDisabled sets the IsDisabled field's value.
+func (s *UpdateContainerServiceInput) SetIsDisabled(v bool) *UpdateContainerServiceInput {
+ s.IsDisabled = &v
+ return s
+}
+
+// SetPower sets the Power field's value.
+func (s *UpdateContainerServiceInput) SetPower(v string) *UpdateContainerServiceInput {
+ s.Power = &v
+ return s
+}
+
+// SetPublicDomainNames sets the PublicDomainNames field's value.
+func (s *UpdateContainerServiceInput) SetPublicDomainNames(v map[string][]*string) *UpdateContainerServiceInput {
+ s.PublicDomainNames = v
+ return s
+}
+
+// SetScale sets the Scale field's value.
+func (s *UpdateContainerServiceInput) SetScale(v int64) *UpdateContainerServiceInput {
+ s.Scale = &v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *UpdateContainerServiceInput) SetServiceName(v string) *UpdateContainerServiceInput {
+ s.ServiceName = &v
+ return s
+}
+
+type UpdateContainerServiceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes a container service.
+ ContainerService *ContainerService `locationName:"containerService" type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateContainerServiceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateContainerServiceOutput) GoString() string {
+ return s.String()
+}
+
+// SetContainerService sets the ContainerService field's value.
+func (s *UpdateContainerServiceOutput) SetContainerService(v *ContainerService) *UpdateContainerServiceOutput {
+ s.ContainerService = v
+ return s
+}
+
+type UpdateDistributionBundleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The bundle ID of the new bundle to apply to your distribution.
+ //
+ // Use the GetDistributionBundles action to get a list of distribution bundle
+ // IDs that you can specify.
+ BundleId *string `locationName:"bundleId" type:"string"`
+
+ // The name of the distribution for which to update the bundle.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ DistributionName *string `locationName:"distributionName" type:"string"`
+}
+
+// String returns the string representation
+func (s UpdateDistributionBundleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateDistributionBundleInput) GoString() string {
+ return s.String()
+}
+
+// SetBundleId sets the BundleId field's value.
+func (s *UpdateDistributionBundleInput) SetBundleId(v string) *UpdateDistributionBundleInput {
+ s.BundleId = &v
+ return s
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *UpdateDistributionBundleInput) SetDistributionName(v string) *UpdateDistributionBundleInput {
+ s.DistributionName = &v
+ return s
+}
+
+type UpdateDistributionBundleOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Describes the API operation.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateDistributionBundleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateDistributionBundleOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *UpdateDistributionBundleOutput) SetOperation(v *Operation) *UpdateDistributionBundleOutput {
+ s.Operation = v
+ return s
+}
+
+type UpdateDistributionInput struct {
+ _ struct{} `type:"structure"`
+
+ // An object that describes the cache behavior settings for the distribution.
+ //
+ // The cacheBehaviorSettings specified in your UpdateDistributionRequest will
+ // replace your distribution's existing settings.
+ CacheBehaviorSettings *CacheSettings `locationName:"cacheBehaviorSettings" type:"structure"`
+
+ // An array of objects that describe the per-path cache behavior for the distribution.
+ CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"`
+
+ // An object that describes the default cache behavior for the distribution.
+ DefaultCacheBehavior *CacheBehavior `locationName:"defaultCacheBehavior" type:"structure"`
+
+ // The name of the distribution to update.
+ //
+ // Use the GetDistributions action to get a list of distribution names that
+ // you can specify.
+ //
+ // DistributionName is a required field
+ DistributionName *string `locationName:"distributionName" type:"string" required:"true"`
+
+ // Indicates whether to enable the distribution.
+ IsEnabled *bool `locationName:"isEnabled" type:"boolean"`
+
+ // An object that describes the origin resource for the distribution, such as
+ // a Lightsail instance or load balancer.
+ //
+ // The distribution pulls, caches, and serves content from the origin.
+ Origin *InputOrigin `locationName:"origin" type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateDistributionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateDistributionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateDistributionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateDistributionInput"}
+ if s.DistributionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DistributionName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCacheBehaviorSettings sets the CacheBehaviorSettings field's value.
+func (s *UpdateDistributionInput) SetCacheBehaviorSettings(v *CacheSettings) *UpdateDistributionInput {
+ s.CacheBehaviorSettings = v
+ return s
+}
+
+// SetCacheBehaviors sets the CacheBehaviors field's value.
+func (s *UpdateDistributionInput) SetCacheBehaviors(v []*CacheBehaviorPerPath) *UpdateDistributionInput {
+ s.CacheBehaviors = v
+ return s
+}
+
+// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value.
+func (s *UpdateDistributionInput) SetDefaultCacheBehavior(v *CacheBehavior) *UpdateDistributionInput {
+ s.DefaultCacheBehavior = v
+ return s
+}
+
+// SetDistributionName sets the DistributionName field's value.
+func (s *UpdateDistributionInput) SetDistributionName(v string) *UpdateDistributionInput {
+ s.DistributionName = &v
+ return s
+}
+
+// SetIsEnabled sets the IsEnabled field's value.
+func (s *UpdateDistributionInput) SetIsEnabled(v bool) *UpdateDistributionInput {
+ s.IsEnabled = &v
+ return s
+}
+
+// SetOrigin sets the Origin field's value.
+func (s *UpdateDistributionInput) SetOrigin(v *InputOrigin) *UpdateDistributionInput {
+ s.Origin = v
+ return s
+}
+
+type UpdateDistributionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operation *Operation `locationName:"operation" type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateDistributionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateDistributionOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperation sets the Operation field's value.
+func (s *UpdateDistributionOutput) SetOperation(v *Operation) *UpdateDistributionOutput {
+ s.Operation = v
+ return s
+}
+
+type UpdateDomainEntryInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of key-value pairs containing information about the domain entry.
+ //
+ // DomainEntry is a required field
+ DomainEntry *DomainEntry `locationName:"domainEntry" type:"structure" required:"true"`
+
+ // The name of the domain recordset to update.
+ //
+ // DomainName is a required field
+ DomainName *string `locationName:"domainName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateDomainEntryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateDomainEntryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateDomainEntryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateDomainEntryInput"}
+ if s.DomainEntry == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainEntry"))
+ }
+ if s.DomainName == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDomainEntry sets the DomainEntry field's value.
+func (s *UpdateDomainEntryInput) SetDomainEntry(v *DomainEntry) *UpdateDomainEntryInput {
+ s.DomainEntry = v
+ return s
+}
+
+// SetDomainName sets the DomainName field's value.
+func (s *UpdateDomainEntryInput) SetDomainName(v string) *UpdateDomainEntryInput {
+ s.DomainName = &v
+ return s
+}
+
+type UpdateDomainEntryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateDomainEntryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateDomainEntryOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *UpdateDomainEntryOutput) SetOperations(v []*Operation) *UpdateDomainEntryOutput {
+ s.Operations = v
+ return s
+}
+
+type UpdateLoadBalancerAttributeInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the attribute you want to update. Valid values are below.
+ //
+ // AttributeName is a required field
+ AttributeName *string `locationName:"attributeName" type:"string" required:"true" enum:"LoadBalancerAttributeName"`
+
+ // The value that you want to specify for the attribute name.
+ //
+ // AttributeValue is a required field
+ AttributeValue *string `locationName:"attributeValue" min:"1" type:"string" required:"true"`
+
+ // The name of the load balancer that you want to modify (e.g., my-load-balancer.
+ //
+ // LoadBalancerName is a required field
+ LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateLoadBalancerAttributeInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateLoadBalancerAttributeInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateLoadBalancerAttributeInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateLoadBalancerAttributeInput"}
+ if s.AttributeName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttributeName"))
+ }
+ if s.AttributeValue == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttributeValue"))
+ }
+ if s.AttributeValue != nil && len(*s.AttributeValue) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributeValue", 1))
+ }
+ if s.LoadBalancerName == nil {
+ invalidParams.Add(request.NewErrParamRequired("LoadBalancerName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeName sets the AttributeName field's value.
+func (s *UpdateLoadBalancerAttributeInput) SetAttributeName(v string) *UpdateLoadBalancerAttributeInput {
+ s.AttributeName = &v
+ return s
+}
+
+// SetAttributeValue sets the AttributeValue field's value.
+func (s *UpdateLoadBalancerAttributeInput) SetAttributeValue(v string) *UpdateLoadBalancerAttributeInput {
+ s.AttributeValue = &v
+ return s
+}
+
+// SetLoadBalancerName sets the LoadBalancerName field's value.
+func (s *UpdateLoadBalancerAttributeInput) SetLoadBalancerName(v string) *UpdateLoadBalancerAttributeInput {
+ s.LoadBalancerName = &v
+ return s
+}
+
+type UpdateLoadBalancerAttributeOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateLoadBalancerAttributeOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateLoadBalancerAttributeOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *UpdateLoadBalancerAttributeOutput) SetOperations(v []*Operation) *UpdateLoadBalancerAttributeOutput {
+ s.Operations = v
+ return s
+}
+
+type UpdateRelationalDatabaseInput struct {
+ _ struct{} `type:"structure"`
+
+ // When true, applies changes immediately. When false, applies changes during
+ // the preferred maintenance window. Some changes may cause an outage.
+ //
+ // Default: false
+ ApplyImmediately *bool `locationName:"applyImmediately" type:"boolean"`
+
+ // Indicates the certificate that needs to be associated with the database.
+ CaCertificateIdentifier *string `locationName:"caCertificateIdentifier" type:"string"`
+
+ // When true, disables automated backup retention for your database.
+ //
+ // Disabling backup retention deletes all automated database backups. Before
+ // disabling this, you may want to create a snapshot of your database using
+ // the create relational database snapshot operation.
+ //
+ // Updates are applied during the next maintenance window because this can result
+ // in an outage.
+ DisableBackupRetention *bool `locationName:"disableBackupRetention" type:"boolean"`
+
+ // When true, enables automated backup retention for your database.
+ //
+ // Updates are applied during the next maintenance window because this can result
+ // in an outage.
+ EnableBackupRetention *bool `locationName:"enableBackupRetention" type:"boolean"`
+
+ // The password for the master user. The password can include any printable
+ // ASCII character except "/", """, or "@".
+ //
+ // MySQL
+ //
+ // Constraints: Must contain from 8 to 41 characters.
+ //
+ // PostgreSQL
+ //
+ // Constraints: Must contain from 8 to 128 characters.
+ MasterUserPassword *string `locationName:"masterUserPassword" type:"string" sensitive:"true"`
+
+ // The daily time range during which automated backups are created for your
+ // database if automated backups are enabled.
+ //
+ // Constraints:
+ //
+ // * Must be in the hh24:mi-hh24:mi format. Example: 16:00-16:30
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Must not conflict with the preferred maintenance window.
+ //
+ // * Must be at least 30 minutes.
+ PreferredBackupWindow *string `locationName:"preferredBackupWindow" type:"string"`
+
+ // The weekly time range during which system maintenance can occur on your database.
+ //
+ // The default is a 30-minute window selected at random from an 8-hour block
+ // of time for each AWS Region, occurring on a random day of the week.
+ //
+ // Constraints:
+ //
+ // * Must be in the ddd:hh24:mi-ddd:hh24:mi format.
+ //
+ // * Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.
+ //
+ // * Must be at least 30 minutes.
+ //
+ // * Specified in Coordinated Universal Time (UTC).
+ //
+ // * Example: Tue:17:00-Tue:17:30
+ PreferredMaintenanceWindow *string `locationName:"preferredMaintenanceWindow" type:"string"`
+
+ // Specifies the accessibility options for your database. A value of true specifies
+ // a database that is available to resources outside of your Lightsail account.
+ // A value of false specifies a database that is available only to your Lightsail
+ // resources in the same region as your database.
+ PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"`
+
+ // The name of your Lightsail database resource to update.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+
+ // When true, the master user password is changed to a new strong password generated
+ // by Lightsail.
+ //
+ // Use the get relational database master user password operation to get the
+ // new password.
+ RotateMasterUserPassword *bool `locationName:"rotateMasterUserPassword" type:"boolean"`
+}
+
+// String returns the string representation
+func (s UpdateRelationalDatabaseInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateRelationalDatabaseInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateRelationalDatabaseInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateRelationalDatabaseInput"}
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetApplyImmediately sets the ApplyImmediately field's value.
+func (s *UpdateRelationalDatabaseInput) SetApplyImmediately(v bool) *UpdateRelationalDatabaseInput {
+ s.ApplyImmediately = &v
+ return s
+}
+
+// SetCaCertificateIdentifier sets the CaCertificateIdentifier field's value.
+func (s *UpdateRelationalDatabaseInput) SetCaCertificateIdentifier(v string) *UpdateRelationalDatabaseInput {
+ s.CaCertificateIdentifier = &v
+ return s
+}
+
+// SetDisableBackupRetention sets the DisableBackupRetention field's value.
+func (s *UpdateRelationalDatabaseInput) SetDisableBackupRetention(v bool) *UpdateRelationalDatabaseInput {
+ s.DisableBackupRetention = &v
+ return s
+}
+
+// SetEnableBackupRetention sets the EnableBackupRetention field's value.
+func (s *UpdateRelationalDatabaseInput) SetEnableBackupRetention(v bool) *UpdateRelationalDatabaseInput {
+ s.EnableBackupRetention = &v
+ return s
+}
+
+// SetMasterUserPassword sets the MasterUserPassword field's value.
+func (s *UpdateRelationalDatabaseInput) SetMasterUserPassword(v string) *UpdateRelationalDatabaseInput {
+ s.MasterUserPassword = &v
+ return s
+}
+
+// SetPreferredBackupWindow sets the PreferredBackupWindow field's value.
+func (s *UpdateRelationalDatabaseInput) SetPreferredBackupWindow(v string) *UpdateRelationalDatabaseInput {
+ s.PreferredBackupWindow = &v
+ return s
+}
+
+// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
+func (s *UpdateRelationalDatabaseInput) SetPreferredMaintenanceWindow(v string) *UpdateRelationalDatabaseInput {
+ s.PreferredMaintenanceWindow = &v
+ return s
+}
+
+// SetPubliclyAccessible sets the PubliclyAccessible field's value.
+func (s *UpdateRelationalDatabaseInput) SetPubliclyAccessible(v bool) *UpdateRelationalDatabaseInput {
+ s.PubliclyAccessible = &v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *UpdateRelationalDatabaseInput) SetRelationalDatabaseName(v string) *UpdateRelationalDatabaseInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+// SetRotateMasterUserPassword sets the RotateMasterUserPassword field's value.
+func (s *UpdateRelationalDatabaseInput) SetRotateMasterUserPassword(v bool) *UpdateRelationalDatabaseInput {
+ s.RotateMasterUserPassword = &v
+ return s
+}
+
+type UpdateRelationalDatabaseOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateRelationalDatabaseOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateRelationalDatabaseOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *UpdateRelationalDatabaseOutput) SetOperations(v []*Operation) *UpdateRelationalDatabaseOutput {
+ s.Operations = v
+ return s
+}
+
+type UpdateRelationalDatabaseParametersInput struct {
+ _ struct{} `type:"structure"`
+
+ // The database parameters to update.
+ //
+ // Parameters is a required field
+ Parameters []*RelationalDatabaseParameter `locationName:"parameters" type:"list" required:"true"`
+
+ // The name of your database for which to update parameters.
+ //
+ // RelationalDatabaseName is a required field
+ RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateRelationalDatabaseParametersInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateRelationalDatabaseParametersInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateRelationalDatabaseParametersInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateRelationalDatabaseParametersInput"}
+ if s.Parameters == nil {
+ invalidParams.Add(request.NewErrParamRequired("Parameters"))
+ }
+ if s.RelationalDatabaseName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetParameters sets the Parameters field's value.
+func (s *UpdateRelationalDatabaseParametersInput) SetParameters(v []*RelationalDatabaseParameter) *UpdateRelationalDatabaseParametersInput {
+ s.Parameters = v
+ return s
+}
+
+// SetRelationalDatabaseName sets the RelationalDatabaseName field's value.
+func (s *UpdateRelationalDatabaseParametersInput) SetRelationalDatabaseName(v string) *UpdateRelationalDatabaseParametersInput {
+ s.RelationalDatabaseName = &v
+ return s
+}
+
+type UpdateRelationalDatabaseParametersOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of objects that describe the result of the action, such as the status
+ // of the request, the timestamp of the request, and the resources affected
+ // by the request.
+ Operations []*Operation `locationName:"operations" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateRelationalDatabaseParametersOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateRelationalDatabaseParametersOutput) GoString() string {
+ return s.String()
+}
+
+// SetOperations sets the Operations field's value.
+func (s *UpdateRelationalDatabaseParametersOutput) SetOperations(v []*Operation) *UpdateRelationalDatabaseParametersOutput {
+ s.Operations = v
+ return s
+}
+
+const (
+ // AccessDirectionInbound is a AccessDirection enum value
+ AccessDirectionInbound = "inbound"
+
+ // AccessDirectionOutbound is a AccessDirection enum value
+ AccessDirectionOutbound = "outbound"
+)
+
+// AccessDirection_Values returns all elements of the AccessDirection enum
+func AccessDirection_Values() []string {
+ return []string{
+ AccessDirectionInbound,
+ AccessDirectionOutbound,
+ }
+}
+
+const (
+ // AccessTypePublic is a AccessType enum value
+ AccessTypePublic = "public"
+
+ // AccessTypePrivate is a AccessType enum value
+ AccessTypePrivate = "private"
+)
+
+// AccessType_Values returns all elements of the AccessType enum
+func AccessType_Values() []string {
+ return []string{
+ AccessTypePublic,
+ AccessTypePrivate,
+ }
+}
+
+const (
+ // AddOnTypeAutoSnapshot is a AddOnType enum value
+ AddOnTypeAutoSnapshot = "AutoSnapshot"
+)
+
+// AddOnType_Values returns all elements of the AddOnType enum
+func AddOnType_Values() []string {
+ return []string{
+ AddOnTypeAutoSnapshot,
+ }
+}
+
+const (
+ // AlarmStateOk is a AlarmState enum value
+ AlarmStateOk = "OK"
+
+ // AlarmStateAlarm is a AlarmState enum value
+ AlarmStateAlarm = "ALARM"
+
+ // AlarmStateInsufficientData is a AlarmState enum value
+ AlarmStateInsufficientData = "INSUFFICIENT_DATA"
+)
+
+// AlarmState_Values returns all elements of the AlarmState enum
+func AlarmState_Values() []string {
+ return []string{
+ AlarmStateOk,
+ AlarmStateAlarm,
+ AlarmStateInsufficientData,
+ }
+}
+
+const (
+ // AutoSnapshotStatusSuccess is a AutoSnapshotStatus enum value
+ AutoSnapshotStatusSuccess = "Success"
+
+ // AutoSnapshotStatusFailed is a AutoSnapshotStatus enum value
+ AutoSnapshotStatusFailed = "Failed"
+
+ // AutoSnapshotStatusInProgress is a AutoSnapshotStatus enum value
+ AutoSnapshotStatusInProgress = "InProgress"
+
+ // AutoSnapshotStatusNotFound is a AutoSnapshotStatus enum value
+ AutoSnapshotStatusNotFound = "NotFound"
+)
+
+// AutoSnapshotStatus_Values returns all elements of the AutoSnapshotStatus enum
+func AutoSnapshotStatus_Values() []string {
+ return []string{
+ AutoSnapshotStatusSuccess,
+ AutoSnapshotStatusFailed,
+ AutoSnapshotStatusInProgress,
+ AutoSnapshotStatusNotFound,
+ }
+}
+
+const (
+ // BehaviorEnumDontCache is a BehaviorEnum enum value
+ BehaviorEnumDontCache = "dont-cache"
+
+ // BehaviorEnumCache is a BehaviorEnum enum value
+ BehaviorEnumCache = "cache"
+)
+
+// BehaviorEnum_Values returns all elements of the BehaviorEnum enum
+func BehaviorEnum_Values() []string {
+ return []string{
+ BehaviorEnumDontCache,
+ BehaviorEnumCache,
+ }
+}
+
+const (
+ // BlueprintTypeOs is a BlueprintType enum value
+ BlueprintTypeOs = "os"
+
+ // BlueprintTypeApp is a BlueprintType enum value
+ BlueprintTypeApp = "app"
+)
+
+// BlueprintType_Values returns all elements of the BlueprintType enum
+func BlueprintType_Values() []string {
+ return []string{
+ BlueprintTypeOs,
+ BlueprintTypeApp,
+ }
+}
+
+const (
+ // BucketMetricNameBucketSizeBytes is a BucketMetricName enum value
+ BucketMetricNameBucketSizeBytes = "BucketSizeBytes"
+
+ // BucketMetricNameNumberOfObjects is a BucketMetricName enum value
+ BucketMetricNameNumberOfObjects = "NumberOfObjects"
+)
+
+// BucketMetricName_Values returns all elements of the BucketMetricName enum
+func BucketMetricName_Values() []string {
+ return []string{
+ BucketMetricNameBucketSizeBytes,
+ BucketMetricNameNumberOfObjects,
+ }
+}
+
+const (
+ // CertificateStatusPendingValidation is a CertificateStatus enum value
+ CertificateStatusPendingValidation = "PENDING_VALIDATION"
+
+ // CertificateStatusIssued is a CertificateStatus enum value
+ CertificateStatusIssued = "ISSUED"
+
+ // CertificateStatusInactive is a CertificateStatus enum value
+ CertificateStatusInactive = "INACTIVE"
+
+ // CertificateStatusExpired is a CertificateStatus enum value
+ CertificateStatusExpired = "EXPIRED"
+
+ // CertificateStatusValidationTimedOut is a CertificateStatus enum value
+ CertificateStatusValidationTimedOut = "VALIDATION_TIMED_OUT"
+
+ // CertificateStatusRevoked is a CertificateStatus enum value
+ CertificateStatusRevoked = "REVOKED"
+
+ // CertificateStatusFailed is a CertificateStatus enum value
+ CertificateStatusFailed = "FAILED"
+)
+
+// CertificateStatus_Values returns all elements of the CertificateStatus enum
+func CertificateStatus_Values() []string {
+ return []string{
+ CertificateStatusPendingValidation,
+ CertificateStatusIssued,
+ CertificateStatusInactive,
+ CertificateStatusExpired,
+ CertificateStatusValidationTimedOut,
+ CertificateStatusRevoked,
+ CertificateStatusFailed,
+ }
+}
+
+const (
+ // CloudFormationStackRecordSourceTypeExportSnapshotRecord is a CloudFormationStackRecordSourceType enum value
+ CloudFormationStackRecordSourceTypeExportSnapshotRecord = "ExportSnapshotRecord"
+)
+
+// CloudFormationStackRecordSourceType_Values returns all elements of the CloudFormationStackRecordSourceType enum
+func CloudFormationStackRecordSourceType_Values() []string {
+ return []string{
+ CloudFormationStackRecordSourceTypeExportSnapshotRecord,
+ }
+}
+
+const (
+ // ComparisonOperatorGreaterThanOrEqualToThreshold is a ComparisonOperator enum value
+ ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold"
+
+ // ComparisonOperatorGreaterThanThreshold is a ComparisonOperator enum value
+ ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold"
+
+ // ComparisonOperatorLessThanThreshold is a ComparisonOperator enum value
+ ComparisonOperatorLessThanThreshold = "LessThanThreshold"
+
+ // ComparisonOperatorLessThanOrEqualToThreshold is a ComparisonOperator enum value
+ ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold"
+)
+
+// ComparisonOperator_Values returns all elements of the ComparisonOperator enum
+func ComparisonOperator_Values() []string {
+ return []string{
+ ComparisonOperatorGreaterThanOrEqualToThreshold,
+ ComparisonOperatorGreaterThanThreshold,
+ ComparisonOperatorLessThanThreshold,
+ ComparisonOperatorLessThanOrEqualToThreshold,
+ }
+}
+
+const (
+ // ContactMethodStatusPendingVerification is a ContactMethodStatus enum value
+ ContactMethodStatusPendingVerification = "PendingVerification"
+
+ // ContactMethodStatusValid is a ContactMethodStatus enum value
+ ContactMethodStatusValid = "Valid"
+
+ // ContactMethodStatusInvalid is a ContactMethodStatus enum value
+ ContactMethodStatusInvalid = "Invalid"
+)
+
+// ContactMethodStatus_Values returns all elements of the ContactMethodStatus enum
+func ContactMethodStatus_Values() []string {
+ return []string{
+ ContactMethodStatusPendingVerification,
+ ContactMethodStatusValid,
+ ContactMethodStatusInvalid,
+ }
+}
+
+const (
+ // ContactMethodVerificationProtocolEmail is a ContactMethodVerificationProtocol enum value
+ ContactMethodVerificationProtocolEmail = "Email"
+)
+
+// ContactMethodVerificationProtocol_Values returns all elements of the ContactMethodVerificationProtocol enum
+func ContactMethodVerificationProtocol_Values() []string {
+ return []string{
+ ContactMethodVerificationProtocolEmail,
+ }
+}
+
+const (
+ // ContactProtocolEmail is a ContactProtocol enum value
+ ContactProtocolEmail = "Email"
+
+ // ContactProtocolSms is a ContactProtocol enum value
+ ContactProtocolSms = "SMS"
+)
+
+// ContactProtocol_Values returns all elements of the ContactProtocol enum
+func ContactProtocol_Values() []string {
+ return []string{
+ ContactProtocolEmail,
+ ContactProtocolSms,
+ }
+}
+
+const (
+ // ContainerServiceDeploymentStateActivating is a ContainerServiceDeploymentState enum value
+ ContainerServiceDeploymentStateActivating = "ACTIVATING"
+
+ // ContainerServiceDeploymentStateActive is a ContainerServiceDeploymentState enum value
+ ContainerServiceDeploymentStateActive = "ACTIVE"
+
+ // ContainerServiceDeploymentStateInactive is a ContainerServiceDeploymentState enum value
+ ContainerServiceDeploymentStateInactive = "INACTIVE"
+
+ // ContainerServiceDeploymentStateFailed is a ContainerServiceDeploymentState enum value
+ ContainerServiceDeploymentStateFailed = "FAILED"
+)
+
+// ContainerServiceDeploymentState_Values returns all elements of the ContainerServiceDeploymentState enum
+func ContainerServiceDeploymentState_Values() []string {
+ return []string{
+ ContainerServiceDeploymentStateActivating,
+ ContainerServiceDeploymentStateActive,
+ ContainerServiceDeploymentStateInactive,
+ ContainerServiceDeploymentStateFailed,
+ }
+}
+
+const (
+ // ContainerServiceMetricNameCpuutilization is a ContainerServiceMetricName enum value
+ ContainerServiceMetricNameCpuutilization = "CPUUtilization"
+
+ // ContainerServiceMetricNameMemoryUtilization is a ContainerServiceMetricName enum value
+ ContainerServiceMetricNameMemoryUtilization = "MemoryUtilization"
+)
+
+// ContainerServiceMetricName_Values returns all elements of the ContainerServiceMetricName enum
+func ContainerServiceMetricName_Values() []string {
+ return []string{
+ ContainerServiceMetricNameCpuutilization,
+ ContainerServiceMetricNameMemoryUtilization,
+ }
+}
+
+const (
+ // ContainerServicePowerNameNano is a ContainerServicePowerName enum value
+ ContainerServicePowerNameNano = "nano"
+
+ // ContainerServicePowerNameMicro is a ContainerServicePowerName enum value
+ ContainerServicePowerNameMicro = "micro"
+
+ // ContainerServicePowerNameSmall is a ContainerServicePowerName enum value
+ ContainerServicePowerNameSmall = "small"
+
+ // ContainerServicePowerNameMedium is a ContainerServicePowerName enum value
+ ContainerServicePowerNameMedium = "medium"
+
+ // ContainerServicePowerNameLarge is a ContainerServicePowerName enum value
+ ContainerServicePowerNameLarge = "large"
+
+ // ContainerServicePowerNameXlarge is a ContainerServicePowerName enum value
+ ContainerServicePowerNameXlarge = "xlarge"
+)
+
+// ContainerServicePowerName_Values returns all elements of the ContainerServicePowerName enum
+func ContainerServicePowerName_Values() []string {
+ return []string{
+ ContainerServicePowerNameNano,
+ ContainerServicePowerNameMicro,
+ ContainerServicePowerNameSmall,
+ ContainerServicePowerNameMedium,
+ ContainerServicePowerNameLarge,
+ ContainerServicePowerNameXlarge,
+ }
+}
+
+const (
+ // ContainerServiceProtocolHttp is a ContainerServiceProtocol enum value
+ ContainerServiceProtocolHttp = "HTTP"
+
+ // ContainerServiceProtocolHttps is a ContainerServiceProtocol enum value
+ ContainerServiceProtocolHttps = "HTTPS"
+
+ // ContainerServiceProtocolTcp is a ContainerServiceProtocol enum value
+ ContainerServiceProtocolTcp = "TCP"
+
+ // ContainerServiceProtocolUdp is a ContainerServiceProtocol enum value
+ ContainerServiceProtocolUdp = "UDP"
+)
+
+// ContainerServiceProtocol_Values returns all elements of the ContainerServiceProtocol enum
+func ContainerServiceProtocol_Values() []string {
+ return []string{
+ ContainerServiceProtocolHttp,
+ ContainerServiceProtocolHttps,
+ ContainerServiceProtocolTcp,
+ ContainerServiceProtocolUdp,
+ }
+}
+
+const (
+ // ContainerServiceStatePending is a ContainerServiceState enum value
+ ContainerServiceStatePending = "PENDING"
+
+ // ContainerServiceStateReady is a ContainerServiceState enum value
+ ContainerServiceStateReady = "READY"
+
+ // ContainerServiceStateRunning is a ContainerServiceState enum value
+ ContainerServiceStateRunning = "RUNNING"
+
+ // ContainerServiceStateUpdating is a ContainerServiceState enum value
+ ContainerServiceStateUpdating = "UPDATING"
+
+ // ContainerServiceStateDeleting is a ContainerServiceState enum value
+ ContainerServiceStateDeleting = "DELETING"
+
+ // ContainerServiceStateDisabled is a ContainerServiceState enum value
+ ContainerServiceStateDisabled = "DISABLED"
+
+ // ContainerServiceStateDeploying is a ContainerServiceState enum value
+ ContainerServiceStateDeploying = "DEPLOYING"
+)
+
+// ContainerServiceState_Values returns all elements of the ContainerServiceState enum
+func ContainerServiceState_Values() []string {
+ return []string{
+ ContainerServiceStatePending,
+ ContainerServiceStateReady,
+ ContainerServiceStateRunning,
+ ContainerServiceStateUpdating,
+ ContainerServiceStateDeleting,
+ ContainerServiceStateDisabled,
+ ContainerServiceStateDeploying,
+ }
+}
+
+const (
+ // ContainerServiceStateDetailCodeCreatingSystemResources is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeCreatingSystemResources = "CREATING_SYSTEM_RESOURCES"
+
+ // ContainerServiceStateDetailCodeCreatingNetworkInfrastructure is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeCreatingNetworkInfrastructure = "CREATING_NETWORK_INFRASTRUCTURE"
+
+ // ContainerServiceStateDetailCodeProvisioningCertificate is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeProvisioningCertificate = "PROVISIONING_CERTIFICATE"
+
+ // ContainerServiceStateDetailCodeProvisioningService is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeProvisioningService = "PROVISIONING_SERVICE"
+
+ // ContainerServiceStateDetailCodeCreatingDeployment is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeCreatingDeployment = "CREATING_DEPLOYMENT"
+
+ // ContainerServiceStateDetailCodeEvaluatingHealthCheck is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeEvaluatingHealthCheck = "EVALUATING_HEALTH_CHECK"
+
+ // ContainerServiceStateDetailCodeActivatingDeployment is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeActivatingDeployment = "ACTIVATING_DEPLOYMENT"
+
+ // ContainerServiceStateDetailCodeCertificateLimitExceeded is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeCertificateLimitExceeded = "CERTIFICATE_LIMIT_EXCEEDED"
+
+ // ContainerServiceStateDetailCodeUnknownError is a ContainerServiceStateDetailCode enum value
+ ContainerServiceStateDetailCodeUnknownError = "UNKNOWN_ERROR"
+)
+
+// ContainerServiceStateDetailCode_Values returns all elements of the ContainerServiceStateDetailCode enum
+func ContainerServiceStateDetailCode_Values() []string {
+ return []string{
+ ContainerServiceStateDetailCodeCreatingSystemResources,
+ ContainerServiceStateDetailCodeCreatingNetworkInfrastructure,
+ ContainerServiceStateDetailCodeProvisioningCertificate,
+ ContainerServiceStateDetailCodeProvisioningService,
+ ContainerServiceStateDetailCodeCreatingDeployment,
+ ContainerServiceStateDetailCodeEvaluatingHealthCheck,
+ ContainerServiceStateDetailCodeActivatingDeployment,
+ ContainerServiceStateDetailCodeCertificateLimitExceeded,
+ ContainerServiceStateDetailCodeUnknownError,
+ }
+}
+
+const (
+ // DiskSnapshotStatePending is a DiskSnapshotState enum value
+ DiskSnapshotStatePending = "pending"
+
+ // DiskSnapshotStateCompleted is a DiskSnapshotState enum value
+ DiskSnapshotStateCompleted = "completed"
+
+ // DiskSnapshotStateError is a DiskSnapshotState enum value
+ DiskSnapshotStateError = "error"
+
+ // DiskSnapshotStateUnknown is a DiskSnapshotState enum value
+ DiskSnapshotStateUnknown = "unknown"
+)
+
+// DiskSnapshotState_Values returns all elements of the DiskSnapshotState enum
+func DiskSnapshotState_Values() []string {
+ return []string{
+ DiskSnapshotStatePending,
+ DiskSnapshotStateCompleted,
+ DiskSnapshotStateError,
+ DiskSnapshotStateUnknown,
+ }
+}
+
+const (
+ // DiskStatePending is a DiskState enum value
+ DiskStatePending = "pending"
+
+ // DiskStateError is a DiskState enum value
+ DiskStateError = "error"
+
+ // DiskStateAvailable is a DiskState enum value
+ DiskStateAvailable = "available"
+
+ // DiskStateInUse is a DiskState enum value
+ DiskStateInUse = "in-use"
+
+ // DiskStateUnknown is a DiskState enum value
+ DiskStateUnknown = "unknown"
+)
+
+// DiskState_Values returns all elements of the DiskState enum
+func DiskState_Values() []string {
+ return []string{
+ DiskStatePending,
+ DiskStateError,
+ DiskStateAvailable,
+ DiskStateInUse,
+ DiskStateUnknown,
+ }
+}
+
+const (
+ // DistributionMetricNameRequests is a DistributionMetricName enum value
+ DistributionMetricNameRequests = "Requests"
+
+ // DistributionMetricNameBytesDownloaded is a DistributionMetricName enum value
+ DistributionMetricNameBytesDownloaded = "BytesDownloaded"
+
+ // DistributionMetricNameBytesUploaded is a DistributionMetricName enum value
+ DistributionMetricNameBytesUploaded = "BytesUploaded"
+
+ // DistributionMetricNameTotalErrorRate is a DistributionMetricName enum value
+ DistributionMetricNameTotalErrorRate = "TotalErrorRate"
+
+ // DistributionMetricNameHttp4xxErrorRate is a DistributionMetricName enum value
+ DistributionMetricNameHttp4xxErrorRate = "Http4xxErrorRate"
+
+ // DistributionMetricNameHttp5xxErrorRate is a DistributionMetricName enum value
+ DistributionMetricNameHttp5xxErrorRate = "Http5xxErrorRate"
+)
+
+// DistributionMetricName_Values returns all elements of the DistributionMetricName enum
+func DistributionMetricName_Values() []string {
+ return []string{
+ DistributionMetricNameRequests,
+ DistributionMetricNameBytesDownloaded,
+ DistributionMetricNameBytesUploaded,
+ DistributionMetricNameTotalErrorRate,
+ DistributionMetricNameHttp4xxErrorRate,
+ DistributionMetricNameHttp5xxErrorRate,
+ }
+}
+
+const (
+ // ExportSnapshotRecordSourceTypeInstanceSnapshot is a ExportSnapshotRecordSourceType enum value
+ ExportSnapshotRecordSourceTypeInstanceSnapshot = "InstanceSnapshot"
+
+ // ExportSnapshotRecordSourceTypeDiskSnapshot is a ExportSnapshotRecordSourceType enum value
+ ExportSnapshotRecordSourceTypeDiskSnapshot = "DiskSnapshot"
+)
+
+// ExportSnapshotRecordSourceType_Values returns all elements of the ExportSnapshotRecordSourceType enum
+func ExportSnapshotRecordSourceType_Values() []string {
+ return []string{
+ ExportSnapshotRecordSourceTypeInstanceSnapshot,
+ ExportSnapshotRecordSourceTypeDiskSnapshot,
+ }
+}
+
+const (
+ // ForwardValuesNone is a ForwardValues enum value
+ ForwardValuesNone = "none"
+
+ // ForwardValuesAllowList is a ForwardValues enum value
+ ForwardValuesAllowList = "allow-list"
+
+ // ForwardValuesAll is a ForwardValues enum value
+ ForwardValuesAll = "all"
+)
+
+// ForwardValues_Values returns all elements of the ForwardValues enum
+func ForwardValues_Values() []string {
+ return []string{
+ ForwardValuesNone,
+ ForwardValuesAllowList,
+ ForwardValuesAll,
+ }
+}
+
+const (
+ // HeaderEnumAccept is a HeaderEnum enum value
+ HeaderEnumAccept = "Accept"
+
+ // HeaderEnumAcceptCharset is a HeaderEnum enum value
+ HeaderEnumAcceptCharset = "Accept-Charset"
+
+ // HeaderEnumAcceptDatetime is a HeaderEnum enum value
+ HeaderEnumAcceptDatetime = "Accept-Datetime"
+
+ // HeaderEnumAcceptEncoding is a HeaderEnum enum value
+ HeaderEnumAcceptEncoding = "Accept-Encoding"
+
+ // HeaderEnumAcceptLanguage is a HeaderEnum enum value
+ HeaderEnumAcceptLanguage = "Accept-Language"
+
+ // HeaderEnumAuthorization is a HeaderEnum enum value
+ HeaderEnumAuthorization = "Authorization"
+
+ // HeaderEnumCloudFrontForwardedProto is a HeaderEnum enum value
+ HeaderEnumCloudFrontForwardedProto = "CloudFront-Forwarded-Proto"
+
+ // HeaderEnumCloudFrontIsDesktopViewer is a HeaderEnum enum value
+ HeaderEnumCloudFrontIsDesktopViewer = "CloudFront-Is-Desktop-Viewer"
+
+ // HeaderEnumCloudFrontIsMobileViewer is a HeaderEnum enum value
+ HeaderEnumCloudFrontIsMobileViewer = "CloudFront-Is-Mobile-Viewer"
+
+ // HeaderEnumCloudFrontIsSmartTvViewer is a HeaderEnum enum value
+ HeaderEnumCloudFrontIsSmartTvViewer = "CloudFront-Is-SmartTV-Viewer"
+
+ // HeaderEnumCloudFrontIsTabletViewer is a HeaderEnum enum value
+ HeaderEnumCloudFrontIsTabletViewer = "CloudFront-Is-Tablet-Viewer"
+
+ // HeaderEnumCloudFrontViewerCountry is a HeaderEnum enum value
+ HeaderEnumCloudFrontViewerCountry = "CloudFront-Viewer-Country"
+
+ // HeaderEnumHost is a HeaderEnum enum value
+ HeaderEnumHost = "Host"
+
+ // HeaderEnumOrigin is a HeaderEnum enum value
+ HeaderEnumOrigin = "Origin"
+
+ // HeaderEnumReferer is a HeaderEnum enum value
+ HeaderEnumReferer = "Referer"
+)
+
+// HeaderEnum_Values returns all elements of the HeaderEnum enum
+func HeaderEnum_Values() []string {
+ return []string{
+ HeaderEnumAccept,
+ HeaderEnumAcceptCharset,
+ HeaderEnumAcceptDatetime,
+ HeaderEnumAcceptEncoding,
+ HeaderEnumAcceptLanguage,
+ HeaderEnumAuthorization,
+ HeaderEnumCloudFrontForwardedProto,
+ HeaderEnumCloudFrontIsDesktopViewer,
+ HeaderEnumCloudFrontIsMobileViewer,
+ HeaderEnumCloudFrontIsSmartTvViewer,
+ HeaderEnumCloudFrontIsTabletViewer,
+ HeaderEnumCloudFrontViewerCountry,
+ HeaderEnumHost,
+ HeaderEnumOrigin,
+ HeaderEnumReferer,
+ }
+}
+
+const (
+ // InstanceAccessProtocolSsh is a InstanceAccessProtocol enum value
+ InstanceAccessProtocolSsh = "ssh"
+
+ // InstanceAccessProtocolRdp is a InstanceAccessProtocol enum value
+ InstanceAccessProtocolRdp = "rdp"
+)
+
+// InstanceAccessProtocol_Values returns all elements of the InstanceAccessProtocol enum
+func InstanceAccessProtocol_Values() []string {
+ return []string{
+ InstanceAccessProtocolSsh,
+ InstanceAccessProtocolRdp,
+ }
+}
+
+const (
+ // InstanceHealthReasonLbRegistrationInProgress is a InstanceHealthReason enum value
+ InstanceHealthReasonLbRegistrationInProgress = "Lb.RegistrationInProgress"
+
+ // InstanceHealthReasonLbInitialHealthChecking is a InstanceHealthReason enum value
+ InstanceHealthReasonLbInitialHealthChecking = "Lb.InitialHealthChecking"
+
+ // InstanceHealthReasonLbInternalError is a InstanceHealthReason enum value
+ InstanceHealthReasonLbInternalError = "Lb.InternalError"
+
+ // InstanceHealthReasonInstanceResponseCodeMismatch is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceResponseCodeMismatch = "Instance.ResponseCodeMismatch"
+
+ // InstanceHealthReasonInstanceTimeout is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceTimeout = "Instance.Timeout"
+
+ // InstanceHealthReasonInstanceFailedHealthChecks is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceFailedHealthChecks = "Instance.FailedHealthChecks"
+
+ // InstanceHealthReasonInstanceNotRegistered is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceNotRegistered = "Instance.NotRegistered"
+
+ // InstanceHealthReasonInstanceNotInUse is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceNotInUse = "Instance.NotInUse"
+
+ // InstanceHealthReasonInstanceDeregistrationInProgress is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceDeregistrationInProgress = "Instance.DeregistrationInProgress"
+
+ // InstanceHealthReasonInstanceInvalidState is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceInvalidState = "Instance.InvalidState"
+
+ // InstanceHealthReasonInstanceIpUnusable is a InstanceHealthReason enum value
+ InstanceHealthReasonInstanceIpUnusable = "Instance.IpUnusable"
+)
+
+// InstanceHealthReason_Values returns all elements of the InstanceHealthReason enum
+func InstanceHealthReason_Values() []string {
+ return []string{
+ InstanceHealthReasonLbRegistrationInProgress,
+ InstanceHealthReasonLbInitialHealthChecking,
+ InstanceHealthReasonLbInternalError,
+ InstanceHealthReasonInstanceResponseCodeMismatch,
+ InstanceHealthReasonInstanceTimeout,
+ InstanceHealthReasonInstanceFailedHealthChecks,
+ InstanceHealthReasonInstanceNotRegistered,
+ InstanceHealthReasonInstanceNotInUse,
+ InstanceHealthReasonInstanceDeregistrationInProgress,
+ InstanceHealthReasonInstanceInvalidState,
+ InstanceHealthReasonInstanceIpUnusable,
+ }
+}
+
+const (
+ // InstanceHealthStateInitial is a InstanceHealthState enum value
+ InstanceHealthStateInitial = "initial"
+
+ // InstanceHealthStateHealthy is a InstanceHealthState enum value
+ InstanceHealthStateHealthy = "healthy"
+
+ // InstanceHealthStateUnhealthy is a InstanceHealthState enum value
+ InstanceHealthStateUnhealthy = "unhealthy"
+
+ // InstanceHealthStateUnused is a InstanceHealthState enum value
+ InstanceHealthStateUnused = "unused"
+
+ // InstanceHealthStateDraining is a InstanceHealthState enum value
+ InstanceHealthStateDraining = "draining"
+
+ // InstanceHealthStateUnavailable is a InstanceHealthState enum value
+ InstanceHealthStateUnavailable = "unavailable"
+)
+
+// InstanceHealthState_Values returns all elements of the InstanceHealthState enum
+func InstanceHealthState_Values() []string {
+ return []string{
+ InstanceHealthStateInitial,
+ InstanceHealthStateHealthy,
+ InstanceHealthStateUnhealthy,
+ InstanceHealthStateUnused,
+ InstanceHealthStateDraining,
+ InstanceHealthStateUnavailable,
+ }
+}
+
+const (
+ // InstanceMetricNameCpuutilization is a InstanceMetricName enum value
+ InstanceMetricNameCpuutilization = "CPUUtilization"
+
+ // InstanceMetricNameNetworkIn is a InstanceMetricName enum value
+ InstanceMetricNameNetworkIn = "NetworkIn"
+
+ // InstanceMetricNameNetworkOut is a InstanceMetricName enum value
+ InstanceMetricNameNetworkOut = "NetworkOut"
+
+ // InstanceMetricNameStatusCheckFailed is a InstanceMetricName enum value
+ InstanceMetricNameStatusCheckFailed = "StatusCheckFailed"
+
+ // InstanceMetricNameStatusCheckFailedInstance is a InstanceMetricName enum value
+ InstanceMetricNameStatusCheckFailedInstance = "StatusCheckFailed_Instance"
+
+ // InstanceMetricNameStatusCheckFailedSystem is a InstanceMetricName enum value
+ InstanceMetricNameStatusCheckFailedSystem = "StatusCheckFailed_System"
+
+ // InstanceMetricNameBurstCapacityTime is a InstanceMetricName enum value
+ InstanceMetricNameBurstCapacityTime = "BurstCapacityTime"
+
+ // InstanceMetricNameBurstCapacityPercentage is a InstanceMetricName enum value
+ InstanceMetricNameBurstCapacityPercentage = "BurstCapacityPercentage"
+)
+
+// InstanceMetricName_Values returns all elements of the InstanceMetricName enum
+func InstanceMetricName_Values() []string {
+ return []string{
+ InstanceMetricNameCpuutilization,
+ InstanceMetricNameNetworkIn,
+ InstanceMetricNameNetworkOut,
+ InstanceMetricNameStatusCheckFailed,
+ InstanceMetricNameStatusCheckFailedInstance,
+ InstanceMetricNameStatusCheckFailedSystem,
+ InstanceMetricNameBurstCapacityTime,
+ InstanceMetricNameBurstCapacityPercentage,
+ }
+}
+
+const (
+ // InstancePlatformLinuxUnix is a InstancePlatform enum value
+ InstancePlatformLinuxUnix = "LINUX_UNIX"
+
+ // InstancePlatformWindows is a InstancePlatform enum value
+ InstancePlatformWindows = "WINDOWS"
+)
+
+// InstancePlatform_Values returns all elements of the InstancePlatform enum
+func InstancePlatform_Values() []string {
+ return []string{
+ InstancePlatformLinuxUnix,
+ InstancePlatformWindows,
+ }
+}
+
+const (
+ // InstanceSnapshotStatePending is a InstanceSnapshotState enum value
+ InstanceSnapshotStatePending = "pending"
+
+ // InstanceSnapshotStateError is a InstanceSnapshotState enum value
+ InstanceSnapshotStateError = "error"
+
+ // InstanceSnapshotStateAvailable is a InstanceSnapshotState enum value
+ InstanceSnapshotStateAvailable = "available"
+)
+
+// InstanceSnapshotState_Values returns all elements of the InstanceSnapshotState enum
+func InstanceSnapshotState_Values() []string {
+ return []string{
+ InstanceSnapshotStatePending,
+ InstanceSnapshotStateError,
+ InstanceSnapshotStateAvailable,
+ }
+}
+
+const (
+ // IpAddressTypeDualstack is a IpAddressType enum value
+ IpAddressTypeDualstack = "dualstack"
+
+ // IpAddressTypeIpv4 is a IpAddressType enum value
+ IpAddressTypeIpv4 = "ipv4"
+)
+
+// IpAddressType_Values returns all elements of the IpAddressType enum
+func IpAddressType_Values() []string {
+ return []string{
+ IpAddressTypeDualstack,
+ IpAddressTypeIpv4,
+ }
+}
+
+const (
+ // LoadBalancerAttributeNameHealthCheckPath is a LoadBalancerAttributeName enum value
+ LoadBalancerAttributeNameHealthCheckPath = "HealthCheckPath"
+
+ // LoadBalancerAttributeNameSessionStickinessEnabled is a LoadBalancerAttributeName enum value
+ LoadBalancerAttributeNameSessionStickinessEnabled = "SessionStickinessEnabled"
+
+ // LoadBalancerAttributeNameSessionStickinessLbCookieDurationSeconds is a LoadBalancerAttributeName enum value
+ LoadBalancerAttributeNameSessionStickinessLbCookieDurationSeconds = "SessionStickiness_LB_CookieDurationSeconds"
+)
+
+// LoadBalancerAttributeName_Values returns all elements of the LoadBalancerAttributeName enum
+func LoadBalancerAttributeName_Values() []string {
+ return []string{
+ LoadBalancerAttributeNameHealthCheckPath,
+ LoadBalancerAttributeNameSessionStickinessEnabled,
+ LoadBalancerAttributeNameSessionStickinessLbCookieDurationSeconds,
+ }
+}
+
+const (
+ // LoadBalancerMetricNameClientTlsnegotiationErrorCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameClientTlsnegotiationErrorCount = "ClientTLSNegotiationErrorCount"
+
+ // LoadBalancerMetricNameHealthyHostCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameHealthyHostCount = "HealthyHostCount"
+
+ // LoadBalancerMetricNameUnhealthyHostCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameUnhealthyHostCount = "UnhealthyHostCount"
+
+ // LoadBalancerMetricNameHttpcodeLb4xxCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameHttpcodeLb4xxCount = "HTTPCode_LB_4XX_Count"
+
+ // LoadBalancerMetricNameHttpcodeLb5xxCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameHttpcodeLb5xxCount = "HTTPCode_LB_5XX_Count"
+
+ // LoadBalancerMetricNameHttpcodeInstance2xxCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameHttpcodeInstance2xxCount = "HTTPCode_Instance_2XX_Count"
+
+ // LoadBalancerMetricNameHttpcodeInstance3xxCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameHttpcodeInstance3xxCount = "HTTPCode_Instance_3XX_Count"
+
+ // LoadBalancerMetricNameHttpcodeInstance4xxCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameHttpcodeInstance4xxCount = "HTTPCode_Instance_4XX_Count"
+
+ // LoadBalancerMetricNameHttpcodeInstance5xxCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameHttpcodeInstance5xxCount = "HTTPCode_Instance_5XX_Count"
+
+ // LoadBalancerMetricNameInstanceResponseTime is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameInstanceResponseTime = "InstanceResponseTime"
+
+ // LoadBalancerMetricNameRejectedConnectionCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameRejectedConnectionCount = "RejectedConnectionCount"
+
+ // LoadBalancerMetricNameRequestCount is a LoadBalancerMetricName enum value
+ LoadBalancerMetricNameRequestCount = "RequestCount"
+)
+
+// LoadBalancerMetricName_Values returns all elements of the LoadBalancerMetricName enum
+func LoadBalancerMetricName_Values() []string {
+ return []string{
+ LoadBalancerMetricNameClientTlsnegotiationErrorCount,
+ LoadBalancerMetricNameHealthyHostCount,
+ LoadBalancerMetricNameUnhealthyHostCount,
+ LoadBalancerMetricNameHttpcodeLb4xxCount,
+ LoadBalancerMetricNameHttpcodeLb5xxCount,
+ LoadBalancerMetricNameHttpcodeInstance2xxCount,
+ LoadBalancerMetricNameHttpcodeInstance3xxCount,
+ LoadBalancerMetricNameHttpcodeInstance4xxCount,
+ LoadBalancerMetricNameHttpcodeInstance5xxCount,
+ LoadBalancerMetricNameInstanceResponseTime,
+ LoadBalancerMetricNameRejectedConnectionCount,
+ LoadBalancerMetricNameRequestCount,
+ }
+}
+
+const (
+ // LoadBalancerProtocolHttpHttps is a LoadBalancerProtocol enum value
+ LoadBalancerProtocolHttpHttps = "HTTP_HTTPS"
+
+ // LoadBalancerProtocolHttp is a LoadBalancerProtocol enum value
+ LoadBalancerProtocolHttp = "HTTP"
+)
+
+// LoadBalancerProtocol_Values returns all elements of the LoadBalancerProtocol enum
+func LoadBalancerProtocol_Values() []string {
+ return []string{
+ LoadBalancerProtocolHttpHttps,
+ LoadBalancerProtocolHttp,
+ }
+}
+
+const (
+ // LoadBalancerStateActive is a LoadBalancerState enum value
+ LoadBalancerStateActive = "active"
+
+ // LoadBalancerStateProvisioning is a LoadBalancerState enum value
+ LoadBalancerStateProvisioning = "provisioning"
+
+ // LoadBalancerStateActiveImpaired is a LoadBalancerState enum value
+ LoadBalancerStateActiveImpaired = "active_impaired"
+
+ // LoadBalancerStateFailed is a LoadBalancerState enum value
+ LoadBalancerStateFailed = "failed"
+
+ // LoadBalancerStateUnknown is a LoadBalancerState enum value
+ LoadBalancerStateUnknown = "unknown"
+)
+
+// LoadBalancerState_Values returns all elements of the LoadBalancerState enum
+func LoadBalancerState_Values() []string {
+ return []string{
+ LoadBalancerStateActive,
+ LoadBalancerStateProvisioning,
+ LoadBalancerStateActiveImpaired,
+ LoadBalancerStateFailed,
+ LoadBalancerStateUnknown,
+ }
+}
+
+const (
+ // LoadBalancerTlsCertificateDomainStatusPendingValidation is a LoadBalancerTlsCertificateDomainStatus enum value
+ LoadBalancerTlsCertificateDomainStatusPendingValidation = "PENDING_VALIDATION"
+
+ // LoadBalancerTlsCertificateDomainStatusFailed is a LoadBalancerTlsCertificateDomainStatus enum value
+ LoadBalancerTlsCertificateDomainStatusFailed = "FAILED"
+
+ // LoadBalancerTlsCertificateDomainStatusSuccess is a LoadBalancerTlsCertificateDomainStatus enum value
+ LoadBalancerTlsCertificateDomainStatusSuccess = "SUCCESS"
+)
+
+// LoadBalancerTlsCertificateDomainStatus_Values returns all elements of the LoadBalancerTlsCertificateDomainStatus enum
+func LoadBalancerTlsCertificateDomainStatus_Values() []string {
+ return []string{
+ LoadBalancerTlsCertificateDomainStatusPendingValidation,
+ LoadBalancerTlsCertificateDomainStatusFailed,
+ LoadBalancerTlsCertificateDomainStatusSuccess,
+ }
+}
+
+const (
+ // LoadBalancerTlsCertificateFailureReasonNoAvailableContacts is a LoadBalancerTlsCertificateFailureReason enum value
+ LoadBalancerTlsCertificateFailureReasonNoAvailableContacts = "NO_AVAILABLE_CONTACTS"
+
+ // LoadBalancerTlsCertificateFailureReasonAdditionalVerificationRequired is a LoadBalancerTlsCertificateFailureReason enum value
+ LoadBalancerTlsCertificateFailureReasonAdditionalVerificationRequired = "ADDITIONAL_VERIFICATION_REQUIRED"
+
+ // LoadBalancerTlsCertificateFailureReasonDomainNotAllowed is a LoadBalancerTlsCertificateFailureReason enum value
+ LoadBalancerTlsCertificateFailureReasonDomainNotAllowed = "DOMAIN_NOT_ALLOWED"
+
+ // LoadBalancerTlsCertificateFailureReasonInvalidPublicDomain is a LoadBalancerTlsCertificateFailureReason enum value
+ LoadBalancerTlsCertificateFailureReasonInvalidPublicDomain = "INVALID_PUBLIC_DOMAIN"
+
+ // LoadBalancerTlsCertificateFailureReasonOther is a LoadBalancerTlsCertificateFailureReason enum value
+ LoadBalancerTlsCertificateFailureReasonOther = "OTHER"
+)
+
+// LoadBalancerTlsCertificateFailureReason_Values returns all elements of the LoadBalancerTlsCertificateFailureReason enum
+func LoadBalancerTlsCertificateFailureReason_Values() []string {
+ return []string{
+ LoadBalancerTlsCertificateFailureReasonNoAvailableContacts,
+ LoadBalancerTlsCertificateFailureReasonAdditionalVerificationRequired,
+ LoadBalancerTlsCertificateFailureReasonDomainNotAllowed,
+ LoadBalancerTlsCertificateFailureReasonInvalidPublicDomain,
+ LoadBalancerTlsCertificateFailureReasonOther,
+ }
+}
+
+const (
+ // LoadBalancerTlsCertificateRenewalStatusPendingAutoRenewal is a LoadBalancerTlsCertificateRenewalStatus enum value
+ LoadBalancerTlsCertificateRenewalStatusPendingAutoRenewal = "PENDING_AUTO_RENEWAL"
+
+ // LoadBalancerTlsCertificateRenewalStatusPendingValidation is a LoadBalancerTlsCertificateRenewalStatus enum value
+ LoadBalancerTlsCertificateRenewalStatusPendingValidation = "PENDING_VALIDATION"
+
+ // LoadBalancerTlsCertificateRenewalStatusSuccess is a LoadBalancerTlsCertificateRenewalStatus enum value
+ LoadBalancerTlsCertificateRenewalStatusSuccess = "SUCCESS"
+
+ // LoadBalancerTlsCertificateRenewalStatusFailed is a LoadBalancerTlsCertificateRenewalStatus enum value
+ LoadBalancerTlsCertificateRenewalStatusFailed = "FAILED"
+)
+
+// LoadBalancerTlsCertificateRenewalStatus_Values returns all elements of the LoadBalancerTlsCertificateRenewalStatus enum
+func LoadBalancerTlsCertificateRenewalStatus_Values() []string {
+ return []string{
+ LoadBalancerTlsCertificateRenewalStatusPendingAutoRenewal,
+ LoadBalancerTlsCertificateRenewalStatusPendingValidation,
+ LoadBalancerTlsCertificateRenewalStatusSuccess,
+ LoadBalancerTlsCertificateRenewalStatusFailed,
+ }
+}
+
+const (
+ // LoadBalancerTlsCertificateRevocationReasonUnspecified is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonUnspecified = "UNSPECIFIED"
+
+ // LoadBalancerTlsCertificateRevocationReasonKeyCompromise is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonKeyCompromise = "KEY_COMPROMISE"
+
+ // LoadBalancerTlsCertificateRevocationReasonCaCompromise is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonCaCompromise = "CA_COMPROMISE"
+
+ // LoadBalancerTlsCertificateRevocationReasonAffiliationChanged is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonAffiliationChanged = "AFFILIATION_CHANGED"
+
+ // LoadBalancerTlsCertificateRevocationReasonSuperceded is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonSuperceded = "SUPERCEDED"
+
+ // LoadBalancerTlsCertificateRevocationReasonCessationOfOperation is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonCessationOfOperation = "CESSATION_OF_OPERATION"
+
+ // LoadBalancerTlsCertificateRevocationReasonCertificateHold is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonCertificateHold = "CERTIFICATE_HOLD"
+
+ // LoadBalancerTlsCertificateRevocationReasonRemoveFromCrl is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonRemoveFromCrl = "REMOVE_FROM_CRL"
+
+ // LoadBalancerTlsCertificateRevocationReasonPrivilegeWithdrawn is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonPrivilegeWithdrawn = "PRIVILEGE_WITHDRAWN"
+
+ // LoadBalancerTlsCertificateRevocationReasonAACompromise is a LoadBalancerTlsCertificateRevocationReason enum value
+ LoadBalancerTlsCertificateRevocationReasonAACompromise = "A_A_COMPROMISE"
+)
+
+// LoadBalancerTlsCertificateRevocationReason_Values returns all elements of the LoadBalancerTlsCertificateRevocationReason enum
+func LoadBalancerTlsCertificateRevocationReason_Values() []string {
+ return []string{
+ LoadBalancerTlsCertificateRevocationReasonUnspecified,
+ LoadBalancerTlsCertificateRevocationReasonKeyCompromise,
+ LoadBalancerTlsCertificateRevocationReasonCaCompromise,
+ LoadBalancerTlsCertificateRevocationReasonAffiliationChanged,
+ LoadBalancerTlsCertificateRevocationReasonSuperceded,
+ LoadBalancerTlsCertificateRevocationReasonCessationOfOperation,
+ LoadBalancerTlsCertificateRevocationReasonCertificateHold,
+ LoadBalancerTlsCertificateRevocationReasonRemoveFromCrl,
+ LoadBalancerTlsCertificateRevocationReasonPrivilegeWithdrawn,
+ LoadBalancerTlsCertificateRevocationReasonAACompromise,
+ }
+}
+
+const (
+ // LoadBalancerTlsCertificateStatusPendingValidation is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusPendingValidation = "PENDING_VALIDATION"
+
+ // LoadBalancerTlsCertificateStatusIssued is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusIssued = "ISSUED"
+
+ // LoadBalancerTlsCertificateStatusInactive is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusInactive = "INACTIVE"
+
+ // LoadBalancerTlsCertificateStatusExpired is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusExpired = "EXPIRED"
+
+ // LoadBalancerTlsCertificateStatusValidationTimedOut is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusValidationTimedOut = "VALIDATION_TIMED_OUT"
+
+ // LoadBalancerTlsCertificateStatusRevoked is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusRevoked = "REVOKED"
+
+ // LoadBalancerTlsCertificateStatusFailed is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusFailed = "FAILED"
+
+ // LoadBalancerTlsCertificateStatusUnknown is a LoadBalancerTlsCertificateStatus enum value
+ LoadBalancerTlsCertificateStatusUnknown = "UNKNOWN"
+)
+
+// LoadBalancerTlsCertificateStatus_Values returns all elements of the LoadBalancerTlsCertificateStatus enum
+func LoadBalancerTlsCertificateStatus_Values() []string {
+ return []string{
+ LoadBalancerTlsCertificateStatusPendingValidation,
+ LoadBalancerTlsCertificateStatusIssued,
+ LoadBalancerTlsCertificateStatusInactive,
+ LoadBalancerTlsCertificateStatusExpired,
+ LoadBalancerTlsCertificateStatusValidationTimedOut,
+ LoadBalancerTlsCertificateStatusRevoked,
+ LoadBalancerTlsCertificateStatusFailed,
+ LoadBalancerTlsCertificateStatusUnknown,
+ }
+}
+
+const (
+ // MetricNameCpuutilization is a MetricName enum value
+ MetricNameCpuutilization = "CPUUtilization"
+
+ // MetricNameNetworkIn is a MetricName enum value
+ MetricNameNetworkIn = "NetworkIn"
+
+ // MetricNameNetworkOut is a MetricName enum value
+ MetricNameNetworkOut = "NetworkOut"
+
+ // MetricNameStatusCheckFailed is a MetricName enum value
+ MetricNameStatusCheckFailed = "StatusCheckFailed"
+
+ // MetricNameStatusCheckFailedInstance is a MetricName enum value
+ MetricNameStatusCheckFailedInstance = "StatusCheckFailed_Instance"
+
+ // MetricNameStatusCheckFailedSystem is a MetricName enum value
+ MetricNameStatusCheckFailedSystem = "StatusCheckFailed_System"
+
+ // MetricNameClientTlsnegotiationErrorCount is a MetricName enum value
+ MetricNameClientTlsnegotiationErrorCount = "ClientTLSNegotiationErrorCount"
+
+ // MetricNameHealthyHostCount is a MetricName enum value
+ MetricNameHealthyHostCount = "HealthyHostCount"
+
+ // MetricNameUnhealthyHostCount is a MetricName enum value
+ MetricNameUnhealthyHostCount = "UnhealthyHostCount"
+
+ // MetricNameHttpcodeLb4xxCount is a MetricName enum value
+ MetricNameHttpcodeLb4xxCount = "HTTPCode_LB_4XX_Count"
+
+ // MetricNameHttpcodeLb5xxCount is a MetricName enum value
+ MetricNameHttpcodeLb5xxCount = "HTTPCode_LB_5XX_Count"
+
+ // MetricNameHttpcodeInstance2xxCount is a MetricName enum value
+ MetricNameHttpcodeInstance2xxCount = "HTTPCode_Instance_2XX_Count"
+
+ // MetricNameHttpcodeInstance3xxCount is a MetricName enum value
+ MetricNameHttpcodeInstance3xxCount = "HTTPCode_Instance_3XX_Count"
+
+ // MetricNameHttpcodeInstance4xxCount is a MetricName enum value
+ MetricNameHttpcodeInstance4xxCount = "HTTPCode_Instance_4XX_Count"
+
+ // MetricNameHttpcodeInstance5xxCount is a MetricName enum value
+ MetricNameHttpcodeInstance5xxCount = "HTTPCode_Instance_5XX_Count"
+
+ // MetricNameInstanceResponseTime is a MetricName enum value
+ MetricNameInstanceResponseTime = "InstanceResponseTime"
+
+ // MetricNameRejectedConnectionCount is a MetricName enum value
+ MetricNameRejectedConnectionCount = "RejectedConnectionCount"
+
+ // MetricNameRequestCount is a MetricName enum value
+ MetricNameRequestCount = "RequestCount"
+
+ // MetricNameDatabaseConnections is a MetricName enum value
+ MetricNameDatabaseConnections = "DatabaseConnections"
+
+ // MetricNameDiskQueueDepth is a MetricName enum value
+ MetricNameDiskQueueDepth = "DiskQueueDepth"
+
+ // MetricNameFreeStorageSpace is a MetricName enum value
+ MetricNameFreeStorageSpace = "FreeStorageSpace"
+
+ // MetricNameNetworkReceiveThroughput is a MetricName enum value
+ MetricNameNetworkReceiveThroughput = "NetworkReceiveThroughput"
+
+ // MetricNameNetworkTransmitThroughput is a MetricName enum value
+ MetricNameNetworkTransmitThroughput = "NetworkTransmitThroughput"
+
+ // MetricNameBurstCapacityTime is a MetricName enum value
+ MetricNameBurstCapacityTime = "BurstCapacityTime"
+
+ // MetricNameBurstCapacityPercentage is a MetricName enum value
+ MetricNameBurstCapacityPercentage = "BurstCapacityPercentage"
+)
+
+// MetricName_Values returns all elements of the MetricName enum
+func MetricName_Values() []string {
+ return []string{
+ MetricNameCpuutilization,
+ MetricNameNetworkIn,
+ MetricNameNetworkOut,
+ MetricNameStatusCheckFailed,
+ MetricNameStatusCheckFailedInstance,
+ MetricNameStatusCheckFailedSystem,
+ MetricNameClientTlsnegotiationErrorCount,
+ MetricNameHealthyHostCount,
+ MetricNameUnhealthyHostCount,
+ MetricNameHttpcodeLb4xxCount,
+ MetricNameHttpcodeLb5xxCount,
+ MetricNameHttpcodeInstance2xxCount,
+ MetricNameHttpcodeInstance3xxCount,
+ MetricNameHttpcodeInstance4xxCount,
+ MetricNameHttpcodeInstance5xxCount,
+ MetricNameInstanceResponseTime,
+ MetricNameRejectedConnectionCount,
+ MetricNameRequestCount,
+ MetricNameDatabaseConnections,
+ MetricNameDiskQueueDepth,
+ MetricNameFreeStorageSpace,
+ MetricNameNetworkReceiveThroughput,
+ MetricNameNetworkTransmitThroughput,
+ MetricNameBurstCapacityTime,
+ MetricNameBurstCapacityPercentage,
+ }
+}
+
+const (
+ // MetricStatisticMinimum is a MetricStatistic enum value
+ MetricStatisticMinimum = "Minimum"
+
+ // MetricStatisticMaximum is a MetricStatistic enum value
+ MetricStatisticMaximum = "Maximum"
+
+ // MetricStatisticSum is a MetricStatistic enum value
+ MetricStatisticSum = "Sum"
+
+ // MetricStatisticAverage is a MetricStatistic enum value
+ MetricStatisticAverage = "Average"
+
+ // MetricStatisticSampleCount is a MetricStatistic enum value
+ MetricStatisticSampleCount = "SampleCount"
+)
+
+// MetricStatistic_Values returns all elements of the MetricStatistic enum
+func MetricStatistic_Values() []string {
+ return []string{
+ MetricStatisticMinimum,
+ MetricStatisticMaximum,
+ MetricStatisticSum,
+ MetricStatisticAverage,
+ MetricStatisticSampleCount,
+ }
+}
+
+const (
+ // MetricUnitSeconds is a MetricUnit enum value
+ MetricUnitSeconds = "Seconds"
+
+ // MetricUnitMicroseconds is a MetricUnit enum value
+ MetricUnitMicroseconds = "Microseconds"
+
+ // MetricUnitMilliseconds is a MetricUnit enum value
+ MetricUnitMilliseconds = "Milliseconds"
+
+ // MetricUnitBytes is a MetricUnit enum value
+ MetricUnitBytes = "Bytes"
+
+ // MetricUnitKilobytes is a MetricUnit enum value
+ MetricUnitKilobytes = "Kilobytes"
+
+ // MetricUnitMegabytes is a MetricUnit enum value
+ MetricUnitMegabytes = "Megabytes"
+
+ // MetricUnitGigabytes is a MetricUnit enum value
+ MetricUnitGigabytes = "Gigabytes"
+
+ // MetricUnitTerabytes is a MetricUnit enum value
+ MetricUnitTerabytes = "Terabytes"
+
+ // MetricUnitBits is a MetricUnit enum value
+ MetricUnitBits = "Bits"
+
+ // MetricUnitKilobits is a MetricUnit enum value
+ MetricUnitKilobits = "Kilobits"
+
+ // MetricUnitMegabits is a MetricUnit enum value
+ MetricUnitMegabits = "Megabits"
+
+ // MetricUnitGigabits is a MetricUnit enum value
+ MetricUnitGigabits = "Gigabits"
+
+ // MetricUnitTerabits is a MetricUnit enum value
+ MetricUnitTerabits = "Terabits"
+
+ // MetricUnitPercent is a MetricUnit enum value
+ MetricUnitPercent = "Percent"
+
+ // MetricUnitCount is a MetricUnit enum value
+ MetricUnitCount = "Count"
+
+ // MetricUnitBytesSecond is a MetricUnit enum value
+ MetricUnitBytesSecond = "Bytes/Second"
+
+ // MetricUnitKilobytesSecond is a MetricUnit enum value
+ MetricUnitKilobytesSecond = "Kilobytes/Second"
+
+ // MetricUnitMegabytesSecond is a MetricUnit enum value
+ MetricUnitMegabytesSecond = "Megabytes/Second"
+
+ // MetricUnitGigabytesSecond is a MetricUnit enum value
+ MetricUnitGigabytesSecond = "Gigabytes/Second"
+
+ // MetricUnitTerabytesSecond is a MetricUnit enum value
+ MetricUnitTerabytesSecond = "Terabytes/Second"
+
+ // MetricUnitBitsSecond is a MetricUnit enum value
+ MetricUnitBitsSecond = "Bits/Second"
+
+ // MetricUnitKilobitsSecond is a MetricUnit enum value
+ MetricUnitKilobitsSecond = "Kilobits/Second"
+
+ // MetricUnitMegabitsSecond is a MetricUnit enum value
+ MetricUnitMegabitsSecond = "Megabits/Second"
+
+ // MetricUnitGigabitsSecond is a MetricUnit enum value
+ MetricUnitGigabitsSecond = "Gigabits/Second"
+
+ // MetricUnitTerabitsSecond is a MetricUnit enum value
+ MetricUnitTerabitsSecond = "Terabits/Second"
+
+ // MetricUnitCountSecond is a MetricUnit enum value
+ MetricUnitCountSecond = "Count/Second"
+
+ // MetricUnitNone is a MetricUnit enum value
+ MetricUnitNone = "None"
+)
+
+// MetricUnit_Values returns all elements of the MetricUnit enum
+func MetricUnit_Values() []string {
+ return []string{
+ MetricUnitSeconds,
+ MetricUnitMicroseconds,
+ MetricUnitMilliseconds,
+ MetricUnitBytes,
+ MetricUnitKilobytes,
+ MetricUnitMegabytes,
+ MetricUnitGigabytes,
+ MetricUnitTerabytes,
+ MetricUnitBits,
+ MetricUnitKilobits,
+ MetricUnitMegabits,
+ MetricUnitGigabits,
+ MetricUnitTerabits,
+ MetricUnitPercent,
+ MetricUnitCount,
+ MetricUnitBytesSecond,
+ MetricUnitKilobytesSecond,
+ MetricUnitMegabytesSecond,
+ MetricUnitGigabytesSecond,
+ MetricUnitTerabytesSecond,
+ MetricUnitBitsSecond,
+ MetricUnitKilobitsSecond,
+ MetricUnitMegabitsSecond,
+ MetricUnitGigabitsSecond,
+ MetricUnitTerabitsSecond,
+ MetricUnitCountSecond,
+ MetricUnitNone,
+ }
+}
+
+const (
+ // NetworkProtocolTcp is a NetworkProtocol enum value
+ NetworkProtocolTcp = "tcp"
+
+ // NetworkProtocolAll is a NetworkProtocol enum value
+ NetworkProtocolAll = "all"
+
+ // NetworkProtocolUdp is a NetworkProtocol enum value
+ NetworkProtocolUdp = "udp"
+
+ // NetworkProtocolIcmp is a NetworkProtocol enum value
+ NetworkProtocolIcmp = "icmp"
+)
+
+// NetworkProtocol_Values returns all elements of the NetworkProtocol enum
+func NetworkProtocol_Values() []string {
+ return []string{
+ NetworkProtocolTcp,
+ NetworkProtocolAll,
+ NetworkProtocolUdp,
+ NetworkProtocolIcmp,
+ }
+}
+
+const (
+ // OperationStatusNotStarted is a OperationStatus enum value
+ OperationStatusNotStarted = "NotStarted"
+
+ // OperationStatusStarted is a OperationStatus enum value
+ OperationStatusStarted = "Started"
+
+ // OperationStatusFailed is a OperationStatus enum value
+ OperationStatusFailed = "Failed"
+
+ // OperationStatusCompleted is a OperationStatus enum value
+ OperationStatusCompleted = "Completed"
+
+ // OperationStatusSucceeded is a OperationStatus enum value
+ OperationStatusSucceeded = "Succeeded"
+)
+
+// OperationStatus_Values returns all elements of the OperationStatus enum
+func OperationStatus_Values() []string {
+ return []string{
+ OperationStatusNotStarted,
+ OperationStatusStarted,
+ OperationStatusFailed,
+ OperationStatusCompleted,
+ OperationStatusSucceeded,
+ }
+}
+
+const (
+ // OperationTypeDeleteKnownHostKeys is a OperationType enum value
+ OperationTypeDeleteKnownHostKeys = "DeleteKnownHostKeys"
+
+ // OperationTypeDeleteInstance is a OperationType enum value
+ OperationTypeDeleteInstance = "DeleteInstance"
+
+ // OperationTypeCreateInstance is a OperationType enum value
+ OperationTypeCreateInstance = "CreateInstance"
+
+ // OperationTypeStopInstance is a OperationType enum value
+ OperationTypeStopInstance = "StopInstance"
+
+ // OperationTypeStartInstance is a OperationType enum value
+ OperationTypeStartInstance = "StartInstance"
+
+ // OperationTypeRebootInstance is a OperationType enum value
+ OperationTypeRebootInstance = "RebootInstance"
+
+ // OperationTypeOpenInstancePublicPorts is a OperationType enum value
+ OperationTypeOpenInstancePublicPorts = "OpenInstancePublicPorts"
+
+ // OperationTypePutInstancePublicPorts is a OperationType enum value
+ OperationTypePutInstancePublicPorts = "PutInstancePublicPorts"
+
+ // OperationTypeCloseInstancePublicPorts is a OperationType enum value
+ OperationTypeCloseInstancePublicPorts = "CloseInstancePublicPorts"
+
+ // OperationTypeAllocateStaticIp is a OperationType enum value
+ OperationTypeAllocateStaticIp = "AllocateStaticIp"
+
+ // OperationTypeReleaseStaticIp is a OperationType enum value
+ OperationTypeReleaseStaticIp = "ReleaseStaticIp"
+
+ // OperationTypeAttachStaticIp is a OperationType enum value
+ OperationTypeAttachStaticIp = "AttachStaticIp"
+
+ // OperationTypeDetachStaticIp is a OperationType enum value
+ OperationTypeDetachStaticIp = "DetachStaticIp"
+
+ // OperationTypeUpdateDomainEntry is a OperationType enum value
+ OperationTypeUpdateDomainEntry = "UpdateDomainEntry"
+
+ // OperationTypeDeleteDomainEntry is a OperationType enum value
+ OperationTypeDeleteDomainEntry = "DeleteDomainEntry"
+
+ // OperationTypeCreateDomain is a OperationType enum value
+ OperationTypeCreateDomain = "CreateDomain"
+
+ // OperationTypeDeleteDomain is a OperationType enum value
+ OperationTypeDeleteDomain = "DeleteDomain"
+
+ // OperationTypeCreateInstanceSnapshot is a OperationType enum value
+ OperationTypeCreateInstanceSnapshot = "CreateInstanceSnapshot"
+
+ // OperationTypeDeleteInstanceSnapshot is a OperationType enum value
+ OperationTypeDeleteInstanceSnapshot = "DeleteInstanceSnapshot"
+
+ // OperationTypeCreateInstancesFromSnapshot is a OperationType enum value
+ OperationTypeCreateInstancesFromSnapshot = "CreateInstancesFromSnapshot"
+
+ // OperationTypeCreateLoadBalancer is a OperationType enum value
+ OperationTypeCreateLoadBalancer = "CreateLoadBalancer"
+
+ // OperationTypeDeleteLoadBalancer is a OperationType enum value
+ OperationTypeDeleteLoadBalancer = "DeleteLoadBalancer"
+
+ // OperationTypeAttachInstancesToLoadBalancer is a OperationType enum value
+ OperationTypeAttachInstancesToLoadBalancer = "AttachInstancesToLoadBalancer"
+
+ // OperationTypeDetachInstancesFromLoadBalancer is a OperationType enum value
+ OperationTypeDetachInstancesFromLoadBalancer = "DetachInstancesFromLoadBalancer"
+
+ // OperationTypeUpdateLoadBalancerAttribute is a OperationType enum value
+ OperationTypeUpdateLoadBalancerAttribute = "UpdateLoadBalancerAttribute"
+
+ // OperationTypeCreateLoadBalancerTlsCertificate is a OperationType enum value
+ OperationTypeCreateLoadBalancerTlsCertificate = "CreateLoadBalancerTlsCertificate"
+
+ // OperationTypeDeleteLoadBalancerTlsCertificate is a OperationType enum value
+ OperationTypeDeleteLoadBalancerTlsCertificate = "DeleteLoadBalancerTlsCertificate"
+
+ // OperationTypeAttachLoadBalancerTlsCertificate is a OperationType enum value
+ OperationTypeAttachLoadBalancerTlsCertificate = "AttachLoadBalancerTlsCertificate"
+
+ // OperationTypeCreateDisk is a OperationType enum value
+ OperationTypeCreateDisk = "CreateDisk"
+
+ // OperationTypeDeleteDisk is a OperationType enum value
+ OperationTypeDeleteDisk = "DeleteDisk"
+
+ // OperationTypeAttachDisk is a OperationType enum value
+ OperationTypeAttachDisk = "AttachDisk"
+
+ // OperationTypeDetachDisk is a OperationType enum value
+ OperationTypeDetachDisk = "DetachDisk"
+
+ // OperationTypeCreateDiskSnapshot is a OperationType enum value
+ OperationTypeCreateDiskSnapshot = "CreateDiskSnapshot"
+
+ // OperationTypeDeleteDiskSnapshot is a OperationType enum value
+ OperationTypeDeleteDiskSnapshot = "DeleteDiskSnapshot"
+
+ // OperationTypeCreateDiskFromSnapshot is a OperationType enum value
+ OperationTypeCreateDiskFromSnapshot = "CreateDiskFromSnapshot"
+
+ // OperationTypeCreateRelationalDatabase is a OperationType enum value
+ OperationTypeCreateRelationalDatabase = "CreateRelationalDatabase"
+
+ // OperationTypeUpdateRelationalDatabase is a OperationType enum value
+ OperationTypeUpdateRelationalDatabase = "UpdateRelationalDatabase"
+
+ // OperationTypeDeleteRelationalDatabase is a OperationType enum value
+ OperationTypeDeleteRelationalDatabase = "DeleteRelationalDatabase"
+
+ // OperationTypeCreateRelationalDatabaseFromSnapshot is a OperationType enum value
+ OperationTypeCreateRelationalDatabaseFromSnapshot = "CreateRelationalDatabaseFromSnapshot"
+
+ // OperationTypeCreateRelationalDatabaseSnapshot is a OperationType enum value
+ OperationTypeCreateRelationalDatabaseSnapshot = "CreateRelationalDatabaseSnapshot"
+
+ // OperationTypeDeleteRelationalDatabaseSnapshot is a OperationType enum value
+ OperationTypeDeleteRelationalDatabaseSnapshot = "DeleteRelationalDatabaseSnapshot"
+
+ // OperationTypeUpdateRelationalDatabaseParameters is a OperationType enum value
+ OperationTypeUpdateRelationalDatabaseParameters = "UpdateRelationalDatabaseParameters"
+
+ // OperationTypeStartRelationalDatabase is a OperationType enum value
+ OperationTypeStartRelationalDatabase = "StartRelationalDatabase"
+
+ // OperationTypeRebootRelationalDatabase is a OperationType enum value
+ OperationTypeRebootRelationalDatabase = "RebootRelationalDatabase"
+
+ // OperationTypeStopRelationalDatabase is a OperationType enum value
+ OperationTypeStopRelationalDatabase = "StopRelationalDatabase"
+
+ // OperationTypeEnableAddOn is a OperationType enum value
+ OperationTypeEnableAddOn = "EnableAddOn"
+
+ // OperationTypeDisableAddOn is a OperationType enum value
+ OperationTypeDisableAddOn = "DisableAddOn"
+
+ // OperationTypePutAlarm is a OperationType enum value
+ OperationTypePutAlarm = "PutAlarm"
+
+ // OperationTypeGetAlarms is a OperationType enum value
+ OperationTypeGetAlarms = "GetAlarms"
+
+ // OperationTypeDeleteAlarm is a OperationType enum value
+ OperationTypeDeleteAlarm = "DeleteAlarm"
+
+ // OperationTypeTestAlarm is a OperationType enum value
+ OperationTypeTestAlarm = "TestAlarm"
+
+ // OperationTypeCreateContactMethod is a OperationType enum value
+ OperationTypeCreateContactMethod = "CreateContactMethod"
+
+ // OperationTypeGetContactMethods is a OperationType enum value
+ OperationTypeGetContactMethods = "GetContactMethods"
+
+ // OperationTypeSendContactMethodVerification is a OperationType enum value
+ OperationTypeSendContactMethodVerification = "SendContactMethodVerification"
+
+ // OperationTypeDeleteContactMethod is a OperationType enum value
+ OperationTypeDeleteContactMethod = "DeleteContactMethod"
+
+ // OperationTypeCreateDistribution is a OperationType enum value
+ OperationTypeCreateDistribution = "CreateDistribution"
+
+ // OperationTypeUpdateDistribution is a OperationType enum value
+ OperationTypeUpdateDistribution = "UpdateDistribution"
+
+ // OperationTypeDeleteDistribution is a OperationType enum value
+ OperationTypeDeleteDistribution = "DeleteDistribution"
+
+ // OperationTypeResetDistributionCache is a OperationType enum value
+ OperationTypeResetDistributionCache = "ResetDistributionCache"
+
+ // OperationTypeAttachCertificateToDistribution is a OperationType enum value
+ OperationTypeAttachCertificateToDistribution = "AttachCertificateToDistribution"
+
+ // OperationTypeDetachCertificateFromDistribution is a OperationType enum value
+ OperationTypeDetachCertificateFromDistribution = "DetachCertificateFromDistribution"
+
+ // OperationTypeUpdateDistributionBundle is a OperationType enum value
+ OperationTypeUpdateDistributionBundle = "UpdateDistributionBundle"
+
+ // OperationTypeSetIpAddressType is a OperationType enum value
+ OperationTypeSetIpAddressType = "SetIpAddressType"
+
+ // OperationTypeCreateCertificate is a OperationType enum value
+ OperationTypeCreateCertificate = "CreateCertificate"
+
+ // OperationTypeDeleteCertificate is a OperationType enum value
+ OperationTypeDeleteCertificate = "DeleteCertificate"
+
+ // OperationTypeCreateContainerService is a OperationType enum value
+ OperationTypeCreateContainerService = "CreateContainerService"
+
+ // OperationTypeUpdateContainerService is a OperationType enum value
+ OperationTypeUpdateContainerService = "UpdateContainerService"
+
+ // OperationTypeDeleteContainerService is a OperationType enum value
+ OperationTypeDeleteContainerService = "DeleteContainerService"
+
+ // OperationTypeCreateContainerServiceDeployment is a OperationType enum value
+ OperationTypeCreateContainerServiceDeployment = "CreateContainerServiceDeployment"
+
+ // OperationTypeCreateContainerServiceRegistryLogin is a OperationType enum value
+ OperationTypeCreateContainerServiceRegistryLogin = "CreateContainerServiceRegistryLogin"
+
+ // OperationTypeRegisterContainerImage is a OperationType enum value
+ OperationTypeRegisterContainerImage = "RegisterContainerImage"
+
+ // OperationTypeDeleteContainerImage is a OperationType enum value
+ OperationTypeDeleteContainerImage = "DeleteContainerImage"
+
+ // OperationTypeCreateBucket is a OperationType enum value
+ OperationTypeCreateBucket = "CreateBucket"
+
+ // OperationTypeDeleteBucket is a OperationType enum value
+ OperationTypeDeleteBucket = "DeleteBucket"
+
+ // OperationTypeCreateBucketAccessKey is a OperationType enum value
+ OperationTypeCreateBucketAccessKey = "CreateBucketAccessKey"
+
+ // OperationTypeDeleteBucketAccessKey is a OperationType enum value
+ OperationTypeDeleteBucketAccessKey = "DeleteBucketAccessKey"
+
+ // OperationTypeUpdateBucketBundle is a OperationType enum value
+ OperationTypeUpdateBucketBundle = "UpdateBucketBundle"
+
+ // OperationTypeUpdateBucket is a OperationType enum value
+ OperationTypeUpdateBucket = "UpdateBucket"
+
+ // OperationTypeSetResourceAccessForBucket is a OperationType enum value
+ OperationTypeSetResourceAccessForBucket = "SetResourceAccessForBucket"
+)
+
+// OperationType_Values returns all elements of the OperationType enum
+func OperationType_Values() []string {
+ return []string{
+ OperationTypeDeleteKnownHostKeys,
+ OperationTypeDeleteInstance,
+ OperationTypeCreateInstance,
+ OperationTypeStopInstance,
+ OperationTypeStartInstance,
+ OperationTypeRebootInstance,
+ OperationTypeOpenInstancePublicPorts,
+ OperationTypePutInstancePublicPorts,
+ OperationTypeCloseInstancePublicPorts,
+ OperationTypeAllocateStaticIp,
+ OperationTypeReleaseStaticIp,
+ OperationTypeAttachStaticIp,
+ OperationTypeDetachStaticIp,
+ OperationTypeUpdateDomainEntry,
+ OperationTypeDeleteDomainEntry,
+ OperationTypeCreateDomain,
+ OperationTypeDeleteDomain,
+ OperationTypeCreateInstanceSnapshot,
+ OperationTypeDeleteInstanceSnapshot,
+ OperationTypeCreateInstancesFromSnapshot,
+ OperationTypeCreateLoadBalancer,
+ OperationTypeDeleteLoadBalancer,
+ OperationTypeAttachInstancesToLoadBalancer,
+ OperationTypeDetachInstancesFromLoadBalancer,
+ OperationTypeUpdateLoadBalancerAttribute,
+ OperationTypeCreateLoadBalancerTlsCertificate,
+ OperationTypeDeleteLoadBalancerTlsCertificate,
+ OperationTypeAttachLoadBalancerTlsCertificate,
+ OperationTypeCreateDisk,
+ OperationTypeDeleteDisk,
+ OperationTypeAttachDisk,
+ OperationTypeDetachDisk,
+ OperationTypeCreateDiskSnapshot,
+ OperationTypeDeleteDiskSnapshot,
+ OperationTypeCreateDiskFromSnapshot,
+ OperationTypeCreateRelationalDatabase,
+ OperationTypeUpdateRelationalDatabase,
+ OperationTypeDeleteRelationalDatabase,
+ OperationTypeCreateRelationalDatabaseFromSnapshot,
+ OperationTypeCreateRelationalDatabaseSnapshot,
+ OperationTypeDeleteRelationalDatabaseSnapshot,
+ OperationTypeUpdateRelationalDatabaseParameters,
+ OperationTypeStartRelationalDatabase,
+ OperationTypeRebootRelationalDatabase,
+ OperationTypeStopRelationalDatabase,
+ OperationTypeEnableAddOn,
+ OperationTypeDisableAddOn,
+ OperationTypePutAlarm,
+ OperationTypeGetAlarms,
+ OperationTypeDeleteAlarm,
+ OperationTypeTestAlarm,
+ OperationTypeCreateContactMethod,
+ OperationTypeGetContactMethods,
+ OperationTypeSendContactMethodVerification,
+ OperationTypeDeleteContactMethod,
+ OperationTypeCreateDistribution,
+ OperationTypeUpdateDistribution,
+ OperationTypeDeleteDistribution,
+ OperationTypeResetDistributionCache,
+ OperationTypeAttachCertificateToDistribution,
+ OperationTypeDetachCertificateFromDistribution,
+ OperationTypeUpdateDistributionBundle,
+ OperationTypeSetIpAddressType,
+ OperationTypeCreateCertificate,
+ OperationTypeDeleteCertificate,
+ OperationTypeCreateContainerService,
+ OperationTypeUpdateContainerService,
+ OperationTypeDeleteContainerService,
+ OperationTypeCreateContainerServiceDeployment,
+ OperationTypeCreateContainerServiceRegistryLogin,
+ OperationTypeRegisterContainerImage,
+ OperationTypeDeleteContainerImage,
+ OperationTypeCreateBucket,
+ OperationTypeDeleteBucket,
+ OperationTypeCreateBucketAccessKey,
+ OperationTypeDeleteBucketAccessKey,
+ OperationTypeUpdateBucketBundle,
+ OperationTypeUpdateBucket,
+ OperationTypeSetResourceAccessForBucket,
+ }
+}
+
+const (
+ // OriginProtocolPolicyEnumHttpOnly is a OriginProtocolPolicyEnum enum value
+ OriginProtocolPolicyEnumHttpOnly = "http-only"
+
+ // OriginProtocolPolicyEnumHttpsOnly is a OriginProtocolPolicyEnum enum value
+ OriginProtocolPolicyEnumHttpsOnly = "https-only"
+)
+
+// OriginProtocolPolicyEnum_Values returns all elements of the OriginProtocolPolicyEnum enum
+func OriginProtocolPolicyEnum_Values() []string {
+ return []string{
+ OriginProtocolPolicyEnumHttpOnly,
+ OriginProtocolPolicyEnumHttpsOnly,
+ }
+}
+
+const (
+ // PortAccessTypePublic is a PortAccessType enum value
+ PortAccessTypePublic = "Public"
+
+ // PortAccessTypePrivate is a PortAccessType enum value
+ PortAccessTypePrivate = "Private"
+)
+
+// PortAccessType_Values returns all elements of the PortAccessType enum
+func PortAccessType_Values() []string {
+ return []string{
+ PortAccessTypePublic,
+ PortAccessTypePrivate,
+ }
+}
+
+const (
+ // PortInfoSourceTypeDefault is a PortInfoSourceType enum value
+ PortInfoSourceTypeDefault = "DEFAULT"
+
+ // PortInfoSourceTypeInstance is a PortInfoSourceType enum value
+ PortInfoSourceTypeInstance = "INSTANCE"
+
+ // PortInfoSourceTypeNone is a PortInfoSourceType enum value
+ PortInfoSourceTypeNone = "NONE"
+
+ // PortInfoSourceTypeClosed is a PortInfoSourceType enum value
+ PortInfoSourceTypeClosed = "CLOSED"
+)
+
+// PortInfoSourceType_Values returns all elements of the PortInfoSourceType enum
+func PortInfoSourceType_Values() []string {
+ return []string{
+ PortInfoSourceTypeDefault,
+ PortInfoSourceTypeInstance,
+ PortInfoSourceTypeNone,
+ PortInfoSourceTypeClosed,
+ }
+}
+
+const (
+ // PortStateOpen is a PortState enum value
+ PortStateOpen = "open"
+
+ // PortStateClosed is a PortState enum value
+ PortStateClosed = "closed"
+)
+
+// PortState_Values returns all elements of the PortState enum
+func PortState_Values() []string {
+ return []string{
+ PortStateOpen,
+ PortStateClosed,
+ }
+}
+
+const (
+ // RecordStateStarted is a RecordState enum value
+ RecordStateStarted = "Started"
+
+ // RecordStateSucceeded is a RecordState enum value
+ RecordStateSucceeded = "Succeeded"
+
+ // RecordStateFailed is a RecordState enum value
+ RecordStateFailed = "Failed"
+)
+
+// RecordState_Values returns all elements of the RecordState enum
+func RecordState_Values() []string {
+ return []string{
+ RecordStateStarted,
+ RecordStateSucceeded,
+ RecordStateFailed,
+ }
+}
+
+const (
+ // RegionNameUsEast1 is a RegionName enum value
+ RegionNameUsEast1 = "us-east-1"
+
+ // RegionNameUsEast2 is a RegionName enum value
+ RegionNameUsEast2 = "us-east-2"
+
+ // RegionNameUsWest1 is a RegionName enum value
+ RegionNameUsWest1 = "us-west-1"
+
+ // RegionNameUsWest2 is a RegionName enum value
+ RegionNameUsWest2 = "us-west-2"
+
+ // RegionNameEuWest1 is a RegionName enum value
+ RegionNameEuWest1 = "eu-west-1"
+
+ // RegionNameEuWest2 is a RegionName enum value
+ RegionNameEuWest2 = "eu-west-2"
+
+ // RegionNameEuWest3 is a RegionName enum value
+ RegionNameEuWest3 = "eu-west-3"
+
+ // RegionNameEuCentral1 is a RegionName enum value
+ RegionNameEuCentral1 = "eu-central-1"
+
+ // RegionNameCaCentral1 is a RegionName enum value
+ RegionNameCaCentral1 = "ca-central-1"
+
+ // RegionNameApSouth1 is a RegionName enum value
+ RegionNameApSouth1 = "ap-south-1"
+
+ // RegionNameApSoutheast1 is a RegionName enum value
+ RegionNameApSoutheast1 = "ap-southeast-1"
+
+ // RegionNameApSoutheast2 is a RegionName enum value
+ RegionNameApSoutheast2 = "ap-southeast-2"
+
+ // RegionNameApNortheast1 is a RegionName enum value
+ RegionNameApNortheast1 = "ap-northeast-1"
+
+ // RegionNameApNortheast2 is a RegionName enum value
+ RegionNameApNortheast2 = "ap-northeast-2"
+
+ // RegionNameEuNorth1 is a RegionName enum value
+ RegionNameEuNorth1 = "eu-north-1"
+)
+
+// RegionName_Values returns all elements of the RegionName enum
+func RegionName_Values() []string {
+ return []string{
+ RegionNameUsEast1,
+ RegionNameUsEast2,
+ RegionNameUsWest1,
+ RegionNameUsWest2,
+ RegionNameEuWest1,
+ RegionNameEuWest2,
+ RegionNameEuWest3,
+ RegionNameEuCentral1,
+ RegionNameCaCentral1,
+ RegionNameApSouth1,
+ RegionNameApSoutheast1,
+ RegionNameApSoutheast2,
+ RegionNameApNortheast1,
+ RegionNameApNortheast2,
+ RegionNameEuNorth1,
+ }
+}
+
+const (
+ // RelationalDatabaseEngineMysql is a RelationalDatabaseEngine enum value
+ RelationalDatabaseEngineMysql = "mysql"
+)
+
+// RelationalDatabaseEngine_Values returns all elements of the RelationalDatabaseEngine enum
+func RelationalDatabaseEngine_Values() []string {
+ return []string{
+ RelationalDatabaseEngineMysql,
+ }
+}
+
+const (
+ // RelationalDatabaseMetricNameCpuutilization is a RelationalDatabaseMetricName enum value
+ RelationalDatabaseMetricNameCpuutilization = "CPUUtilization"
+
+ // RelationalDatabaseMetricNameDatabaseConnections is a RelationalDatabaseMetricName enum value
+ RelationalDatabaseMetricNameDatabaseConnections = "DatabaseConnections"
+
+ // RelationalDatabaseMetricNameDiskQueueDepth is a RelationalDatabaseMetricName enum value
+ RelationalDatabaseMetricNameDiskQueueDepth = "DiskQueueDepth"
+
+ // RelationalDatabaseMetricNameFreeStorageSpace is a RelationalDatabaseMetricName enum value
+ RelationalDatabaseMetricNameFreeStorageSpace = "FreeStorageSpace"
+
+ // RelationalDatabaseMetricNameNetworkReceiveThroughput is a RelationalDatabaseMetricName enum value
+ RelationalDatabaseMetricNameNetworkReceiveThroughput = "NetworkReceiveThroughput"
+
+ // RelationalDatabaseMetricNameNetworkTransmitThroughput is a RelationalDatabaseMetricName enum value
+ RelationalDatabaseMetricNameNetworkTransmitThroughput = "NetworkTransmitThroughput"
+)
+
+// RelationalDatabaseMetricName_Values returns all elements of the RelationalDatabaseMetricName enum
+func RelationalDatabaseMetricName_Values() []string {
+ return []string{
+ RelationalDatabaseMetricNameCpuutilization,
+ RelationalDatabaseMetricNameDatabaseConnections,
+ RelationalDatabaseMetricNameDiskQueueDepth,
+ RelationalDatabaseMetricNameFreeStorageSpace,
+ RelationalDatabaseMetricNameNetworkReceiveThroughput,
+ RelationalDatabaseMetricNameNetworkTransmitThroughput,
+ }
+}
+
+const (
+ // RelationalDatabasePasswordVersionCurrent is a RelationalDatabasePasswordVersion enum value
+ RelationalDatabasePasswordVersionCurrent = "CURRENT"
+
+ // RelationalDatabasePasswordVersionPrevious is a RelationalDatabasePasswordVersion enum value
+ RelationalDatabasePasswordVersionPrevious = "PREVIOUS"
+
+ // RelationalDatabasePasswordVersionPending is a RelationalDatabasePasswordVersion enum value
+ RelationalDatabasePasswordVersionPending = "PENDING"
+)
+
+// RelationalDatabasePasswordVersion_Values returns all elements of the RelationalDatabasePasswordVersion enum
+func RelationalDatabasePasswordVersion_Values() []string {
+ return []string{
+ RelationalDatabasePasswordVersionCurrent,
+ RelationalDatabasePasswordVersionPrevious,
+ RelationalDatabasePasswordVersionPending,
+ }
+}
+
+const (
+ // RenewalStatusPendingAutoRenewal is a RenewalStatus enum value
+ RenewalStatusPendingAutoRenewal = "PendingAutoRenewal"
+
+ // RenewalStatusPendingValidation is a RenewalStatus enum value
+ RenewalStatusPendingValidation = "PendingValidation"
+
+ // RenewalStatusSuccess is a RenewalStatus enum value
+ RenewalStatusSuccess = "Success"
+
+ // RenewalStatusFailed is a RenewalStatus enum value
+ RenewalStatusFailed = "Failed"
+)
+
+// RenewalStatus_Values returns all elements of the RenewalStatus enum
+func RenewalStatus_Values() []string {
+ return []string{
+ RenewalStatusPendingAutoRenewal,
+ RenewalStatusPendingValidation,
+ RenewalStatusSuccess,
+ RenewalStatusFailed,
+ }
+}
+
+const (
+ // ResourceBucketAccessAllow is a ResourceBucketAccess enum value
+ ResourceBucketAccessAllow = "allow"
+
+ // ResourceBucketAccessDeny is a ResourceBucketAccess enum value
+ ResourceBucketAccessDeny = "deny"
+)
+
+// ResourceBucketAccess_Values returns all elements of the ResourceBucketAccess enum
+func ResourceBucketAccess_Values() []string {
+ return []string{
+ ResourceBucketAccessAllow,
+ ResourceBucketAccessDeny,
+ }
+}
+
+const (
+ // ResourceTypeContainerService is a ResourceType enum value
+ ResourceTypeContainerService = "ContainerService"
+
+ // ResourceTypeInstance is a ResourceType enum value
+ ResourceTypeInstance = "Instance"
+
+ // ResourceTypeStaticIp is a ResourceType enum value
+ ResourceTypeStaticIp = "StaticIp"
+
+ // ResourceTypeKeyPair is a ResourceType enum value
+ ResourceTypeKeyPair = "KeyPair"
+
+ // ResourceTypeInstanceSnapshot is a ResourceType enum value
+ ResourceTypeInstanceSnapshot = "InstanceSnapshot"
+
+ // ResourceTypeDomain is a ResourceType enum value
+ ResourceTypeDomain = "Domain"
+
+ // ResourceTypePeeredVpc is a ResourceType enum value
+ ResourceTypePeeredVpc = "PeeredVpc"
+
+ // ResourceTypeLoadBalancer is a ResourceType enum value
+ ResourceTypeLoadBalancer = "LoadBalancer"
+
+ // ResourceTypeLoadBalancerTlsCertificate is a ResourceType enum value
+ ResourceTypeLoadBalancerTlsCertificate = "LoadBalancerTlsCertificate"
+
+ // ResourceTypeDisk is a ResourceType enum value
+ ResourceTypeDisk = "Disk"
+
+ // ResourceTypeDiskSnapshot is a ResourceType enum value
+ ResourceTypeDiskSnapshot = "DiskSnapshot"
+
+ // ResourceTypeRelationalDatabase is a ResourceType enum value
+ ResourceTypeRelationalDatabase = "RelationalDatabase"
+
+ // ResourceTypeRelationalDatabaseSnapshot is a ResourceType enum value
+ ResourceTypeRelationalDatabaseSnapshot = "RelationalDatabaseSnapshot"
+
+ // ResourceTypeExportSnapshotRecord is a ResourceType enum value
+ ResourceTypeExportSnapshotRecord = "ExportSnapshotRecord"
+
+ // ResourceTypeCloudFormationStackRecord is a ResourceType enum value
+ ResourceTypeCloudFormationStackRecord = "CloudFormationStackRecord"
+
+ // ResourceTypeAlarm is a ResourceType enum value
+ ResourceTypeAlarm = "Alarm"
+
+ // ResourceTypeContactMethod is a ResourceType enum value
+ ResourceTypeContactMethod = "ContactMethod"
+
+ // ResourceTypeDistribution is a ResourceType enum value
+ ResourceTypeDistribution = "Distribution"
+
+ // ResourceTypeCertificate is a ResourceType enum value
+ ResourceTypeCertificate = "Certificate"
+
+ // ResourceTypeBucket is a ResourceType enum value
+ ResourceTypeBucket = "Bucket"
+)
+
+// ResourceType_Values returns all elements of the ResourceType enum
+func ResourceType_Values() []string {
+ return []string{
+ ResourceTypeContainerService,
+ ResourceTypeInstance,
+ ResourceTypeStaticIp,
+ ResourceTypeKeyPair,
+ ResourceTypeInstanceSnapshot,
+ ResourceTypeDomain,
+ ResourceTypePeeredVpc,
+ ResourceTypeLoadBalancer,
+ ResourceTypeLoadBalancerTlsCertificate,
+ ResourceTypeDisk,
+ ResourceTypeDiskSnapshot,
+ ResourceTypeRelationalDatabase,
+ ResourceTypeRelationalDatabaseSnapshot,
+ ResourceTypeExportSnapshotRecord,
+ ResourceTypeCloudFormationStackRecord,
+ ResourceTypeAlarm,
+ ResourceTypeContactMethod,
+ ResourceTypeDistribution,
+ ResourceTypeCertificate,
+ ResourceTypeBucket,
+ }
+}
+
+const (
+ // StatusTypeActive is a StatusType enum value
+ StatusTypeActive = "Active"
+
+ // StatusTypeInactive is a StatusType enum value
+ StatusTypeInactive = "Inactive"
+)
+
+// StatusType_Values returns all elements of the StatusType enum
+func StatusType_Values() []string {
+ return []string{
+ StatusTypeActive,
+ StatusTypeInactive,
+ }
+}
+
+const (
+ // TreatMissingDataBreaching is a TreatMissingData enum value
+ TreatMissingDataBreaching = "breaching"
+
+ // TreatMissingDataNotBreaching is a TreatMissingData enum value
+ TreatMissingDataNotBreaching = "notBreaching"
+
+ // TreatMissingDataIgnore is a TreatMissingData enum value
+ TreatMissingDataIgnore = "ignore"
+
+ // TreatMissingDataMissing is a TreatMissingData enum value
+ TreatMissingDataMissing = "missing"
+)
+
+// TreatMissingData_Values returns all elements of the TreatMissingData enum
+func TreatMissingData_Values() []string {
+ return []string{
+ TreatMissingDataBreaching,
+ TreatMissingDataNotBreaching,
+ TreatMissingDataIgnore,
+ TreatMissingDataMissing,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/doc.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/doc.go
new file mode 100644
index 000000000..73c35ed0a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/doc.go
@@ -0,0 +1,45 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package lightsail provides the client and types for making API
+// requests to Amazon Lightsail.
+//
+// Amazon Lightsail is the easiest way to get started with Amazon Web Services
+// (AWS) for developers who need to build websites or web applications. It includes
+// everything you need to launch your project quickly - instances (virtual private
+// servers), container services, storage buckets, managed databases, SSD-based
+// block storage, static IP addresses, load balancers, content delivery network
+// (CDN) distributions, DNS management of registered domains, and resource snapshots
+// (backups) - for a low, predictable monthly price.
+//
+// You can manage your Lightsail resources using the Lightsail console, Lightsail
+// API, AWS Command Line Interface (AWS CLI), or SDKs. For more information
+// about Lightsail concepts and tasks, see the Amazon Lightsail Developer Guide
+// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/lightsail-how-to-set-up-access-keys-to-use-sdk-api-cli).
+//
+// This API Reference provides detailed information about the actions, data
+// types, parameters, and errors of the Lightsail service. For more information
+// about the supported AWS Regions, endpoints, and service quotas of the Lightsail
+// service, see Amazon Lightsail Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/lightsail.html)
+// in the AWS General Reference.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28 for more information on this service.
+//
+// See lightsail package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/lightsail/
+//
+// Using the Client
+//
+// To contact Amazon Lightsail with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Lightsail client Lightsail for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/lightsail/#New
+package lightsail
diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/errors.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/errors.go
new file mode 100644
index 000000000..f390e9f56
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/errors.go
@@ -0,0 +1,69 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package lightsail
+
+import (
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+const (
+
+ // ErrCodeAccessDeniedException for service response error code
+ // "AccessDeniedException".
+ //
+ // Lightsail throws this exception when the user cannot be authenticated or
+ // uses invalid credentials to access a resource.
+ ErrCodeAccessDeniedException = "AccessDeniedException"
+
+ // ErrCodeAccountSetupInProgressException for service response error code
+ // "AccountSetupInProgressException".
+ //
+ // Lightsail throws this exception when an account is still in the setup in
+ // progress state.
+ ErrCodeAccountSetupInProgressException = "AccountSetupInProgressException"
+
+ // ErrCodeInvalidInputException for service response error code
+ // "InvalidInputException".
+ //
+ // Lightsail throws this exception when user input does not conform to the validation
+ // rules of an input field.
+ //
+ // Domain and distribution APIs are only available in the N. Virginia (us-east-1)
+ // AWS Region. Please set your AWS Region configuration to us-east-1 to create,
+ // view, or edit these resources.
+ ErrCodeInvalidInputException = "InvalidInputException"
+
+ // ErrCodeNotFoundException for service response error code
+ // "NotFoundException".
+ //
+ // Lightsail throws this exception when it cannot find a resource.
+ ErrCodeNotFoundException = "NotFoundException"
+
+ // ErrCodeOperationFailureException for service response error code
+ // "OperationFailureException".
+ //
+ // Lightsail throws this exception when an operation fails to execute.
+ ErrCodeOperationFailureException = "OperationFailureException"
+
+ // ErrCodeServiceException for service response error code
+ // "ServiceException".
+ //
+ // A general service exception.
+ ErrCodeServiceException = "ServiceException"
+
+ // ErrCodeUnauthenticatedException for service response error code
+ // "UnauthenticatedException".
+ //
+ // Lightsail throws this exception when the user has not been authenticated.
+ ErrCodeUnauthenticatedException = "UnauthenticatedException"
+)
+
+var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
+ "AccessDeniedException": newErrorAccessDeniedException,
+ "AccountSetupInProgressException": newErrorAccountSetupInProgressException,
+ "InvalidInputException": newErrorInvalidInputException,
+ "NotFoundException": newErrorNotFoundException,
+ "OperationFailureException": newErrorOperationFailureException,
+ "ServiceException": newErrorServiceException,
+ "UnauthenticatedException": newErrorUnauthenticatedException,
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go
new file mode 100644
index 000000000..171dd9999
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go
@@ -0,0 +1,103 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package lightsail
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+)
+
+// Lightsail provides the API operation methods for making requests to
+// Amazon Lightsail. See this package's package overview docs
+// for details on the service.
+//
+// Lightsail methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type Lightsail struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "lightsail" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "Lightsail" // ServiceID is a unique identifier of a specific service.
+)
+
+// New creates a new instance of the Lightsail client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// mySession := session.Must(session.NewSession())
+//
+// // Create a Lightsail client from just a session.
+// svc := lightsail.New(mySession)
+//
+// // Create a Lightsail client with additional configuration
+// svc := lightsail.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lightsail {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Lightsail {
+ svc := &Lightsail{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2016-11-28",
+ JSONVersion: "1.1",
+ TargetPrefix: "Lightsail_20161128",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(
+ protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
+ )
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a Lightsail operation and runs any
+// custom request initialization.
+func (c *Lightsail) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go
new file mode 100644
index 000000000..4498f285e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go
@@ -0,0 +1,1210 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sso
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/restjson"
+)
+
+const opGetRoleCredentials = "GetRoleCredentials"
+
+// GetRoleCredentialsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRoleCredentials operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRoleCredentials for more information on using the GetRoleCredentials
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRoleCredentialsRequest method.
+// req, resp := client.GetRoleCredentialsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
+func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) {
+ op := &request.Operation{
+ Name: opGetRoleCredentials,
+ HTTPMethod: "GET",
+ HTTPPath: "/federation/credentials",
+ }
+
+ if input == nil {
+ input = &GetRoleCredentialsInput{}
+ }
+
+ output = &GetRoleCredentialsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// GetRoleCredentials API operation for AWS Single Sign-On.
+//
+// Returns the STS short-term credentials for a given role name that is assigned
+// to the user.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation GetRoleCredentials for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// * ResourceNotFoundException
+// The specified resource doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
+func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) {
+ req, out := c.GetRoleCredentialsRequest(input)
+ return out, req.Send()
+}
+
+// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRoleCredentials for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) {
+ req, out := c.GetRoleCredentialsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListAccountRoles = "ListAccountRoles"
+
+// ListAccountRolesRequest generates a "aws/request.Request" representing the
+// client's request for the ListAccountRoles operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListAccountRoles for more information on using the ListAccountRoles
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListAccountRolesRequest method.
+// req, resp := client.ListAccountRolesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
+func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) {
+ op := &request.Operation{
+ Name: opListAccountRoles,
+ HTTPMethod: "GET",
+ HTTPPath: "/assignment/roles",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"nextToken"},
+ OutputTokens: []string{"nextToken"},
+ LimitToken: "maxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListAccountRolesInput{}
+ }
+
+ output = &ListAccountRolesOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// ListAccountRoles API operation for AWS Single Sign-On.
+//
+// Lists all roles that are assigned to the user for a given AWS account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation ListAccountRoles for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// * ResourceNotFoundException
+// The specified resource doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
+func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) {
+ req, out := c.ListAccountRolesRequest(input)
+ return out, req.Send()
+}
+
+// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListAccountRoles for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) {
+ req, out := c.ListAccountRolesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListAccountRoles method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListAccountRoles operation.
+// pageNum := 0
+// err := client.ListAccountRolesPages(params,
+// func(page *sso.ListAccountRolesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error {
+ return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListAccountRolesPagesWithContext same as ListAccountRolesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListAccountRolesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListAccountRolesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListAccounts = "ListAccounts"
+
+// ListAccountsRequest generates a "aws/request.Request" representing the
+// client's request for the ListAccounts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListAccounts for more information on using the ListAccounts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListAccountsRequest method.
+// req, resp := client.ListAccountsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
+func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) {
+ op := &request.Operation{
+ Name: opListAccounts,
+ HTTPMethod: "GET",
+ HTTPPath: "/assignment/accounts",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"nextToken"},
+ OutputTokens: []string{"nextToken"},
+ LimitToken: "maxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListAccountsInput{}
+ }
+
+ output = &ListAccountsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// ListAccounts API operation for AWS Single Sign-On.
+//
+// Lists all AWS accounts assigned to the user. These AWS accounts are assigned
+// by the administrator of the account. For more information, see Assign User
+// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
+// in the AWS SSO User Guide. This operation returns a paginated response.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation ListAccounts for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// * ResourceNotFoundException
+// The specified resource doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
+func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) {
+ req, out := c.ListAccountsRequest(input)
+ return out, req.Send()
+}
+
+// ListAccountsWithContext is the same as ListAccounts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListAccounts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) {
+ req, out := c.ListAccountsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListAccountsPages iterates over the pages of a ListAccounts operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListAccounts method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListAccounts operation.
+// pageNum := 0
+// err := client.ListAccountsPages(params,
+// func(page *sso.ListAccountsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error {
+ return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListAccountsPagesWithContext same as ListAccountsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListAccountsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListAccountsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opLogout = "Logout"
+
+// LogoutRequest generates a "aws/request.Request" representing the
+// client's request for the Logout operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See Logout for more information on using the Logout
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the LogoutRequest method.
+// req, resp := client.LogoutRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
+func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) {
+ op := &request.Operation{
+ Name: opLogout,
+ HTTPMethod: "POST",
+ HTTPPath: "/logout",
+ }
+
+ if input == nil {
+ input = &LogoutInput{}
+ }
+
+ output = &LogoutOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// Logout API operation for AWS Single Sign-On.
+//
+// Removes the client- and server-side session that is associated with the user.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation Logout for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
+func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) {
+ req, out := c.LogoutRequest(input)
+ return out, req.Send()
+}
+
+// LogoutWithContext is the same as Logout with the addition of
+// the ability to pass a context and additional request options.
+//
+// See Logout for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) {
+ req, out := c.LogoutRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Provides information about your AWS account.
+type AccountInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The identifier of the AWS account that is assigned to the user.
+ AccountId *string `locationName:"accountId" type:"string"`
+
+ // The display name of the AWS account that is assigned to the user.
+ AccountName *string `locationName:"accountName" type:"string"`
+
+ // The email address of the AWS account that is assigned to the user.
+ EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s AccountInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccountInfo) GoString() string {
+ return s.String()
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *AccountInfo) SetAccountId(v string) *AccountInfo {
+ s.AccountId = &v
+ return s
+}
+
+// SetAccountName sets the AccountName field's value.
+func (s *AccountInfo) SetAccountName(v string) *AccountInfo {
+ s.AccountName = &v
+ return s
+}
+
+// SetEmailAddress sets the EmailAddress field's value.
+func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo {
+ s.EmailAddress = &v
+ return s
+}
+
+type GetRoleCredentialsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // AccountId is a required field
+ AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
+
+ // The friendly name of the role that is assigned to the user.
+ //
+ // RoleName is a required field
+ RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRoleCredentialsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRoleCredentialsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRoleCredentialsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+ if s.AccountId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccountId"))
+ }
+ if s.RoleName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput {
+ s.AccountId = &v
+ return s
+}
+
+// SetRoleName sets the RoleName field's value.
+func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput {
+ s.RoleName = &v
+ return s
+}
+
+type GetRoleCredentialsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The credentials for the role that is assigned to the user.
+ RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetRoleCredentialsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRoleCredentialsOutput) GoString() string {
+ return s.String()
+}
+
+// SetRoleCredentials sets the RoleCredentials field's value.
+func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput {
+ s.RoleCredentials = v
+ return s
+}
+
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+type InvalidRequestException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s InvalidRequestException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InvalidRequestException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRequestException(v protocol.ResponseMetadata) error {
+ return &InvalidRequestException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRequestException) Code() string {
+ return "InvalidRequestException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRequestException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRequestException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRequestException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRequestException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRequestException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type ListAccountRolesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // AccountId is a required field
+ AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
+
+ // The number of items that clients can request per page.
+ MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"`
+
+ // The page token from the previous response output when you request subsequent
+ // pages.
+ NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListAccountRolesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountRolesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListAccountRolesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+ if s.AccountId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccountId"))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput {
+ s.AccountId = &v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput {
+ s.NextToken = &v
+ return s
+}
+
+type ListAccountRolesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // A paginated response with the list of roles and the next token if more results
+ // are available.
+ RoleList []*RoleInfo `locationName:"roleList" type:"list"`
+}
+
+// String returns the string representation
+func (s ListAccountRolesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountRolesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetRoleList sets the RoleList field's value.
+func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput {
+ s.RoleList = v
+ return s
+}
+
+type ListAccountsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+
+ // This is the number of items clients can request per page.
+ MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"`
+
+ // (Optional) When requesting subsequent pages, this is the page token from
+ // the previous response output.
+ NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListAccountsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListAccountsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput {
+ s.NextToken = &v
+ return s
+}
+
+type ListAccountsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A paginated response with the list of account information and the next token
+ // if more results are available.
+ AccountList []*AccountInfo `locationName:"accountList" type:"list"`
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation
+func (s ListAccountsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccountList sets the AccountList field's value.
+func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput {
+ s.AccountList = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput {
+ s.NextToken = &v
+ return s
+}
+
+type LogoutInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+}
+
+// String returns the string representation
+func (s LogoutInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LogoutInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LogoutInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LogoutInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *LogoutInput) SetAccessToken(v string) *LogoutInput {
+ s.AccessToken = &v
+ return s
+}
+
+type LogoutOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s LogoutOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LogoutOutput) GoString() string {
+ return s.String()
+}
+
+// The specified resource doesn't exist.
+type ResourceNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s ResourceNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResourceNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
+ return &ResourceNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ResourceNotFoundException) Code() string {
+ return "ResourceNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *ResourceNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ResourceNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *ResourceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ResourceNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ResourceNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Provides information about the role credentials that are assigned to the
+// user.
+type RoleCredentials struct {
+ _ struct{} `type:"structure"`
+
+ // The identifier used for the temporary security credentials. For more information,
+ // see Using Temporary Security Credentials to Request Access to AWS Resources
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ AccessKeyId *string `locationName:"accessKeyId" type:"string"`
+
+ // The date on which temporary security credentials expire.
+ Expiration *int64 `locationName:"expiration" type:"long"`
+
+ // The key that is used to sign the request. For more information, see Using
+ // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"`
+
+ // The token used for temporary credentials. For more information, see Using
+ // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"`
+}
+
+// String returns the string representation
+func (s RoleCredentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoleCredentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials {
+ s.SessionToken = &v
+ return s
+}
+
+// Provides information about the role that is assigned to the user.
+type RoleInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The identifier of the AWS account assigned to the user.
+ AccountId *string `locationName:"accountId" type:"string"`
+
+ // The friendly name of the role that is assigned to the user.
+ RoleName *string `locationName:"roleName" type:"string"`
+}
+
+// String returns the string representation
+func (s RoleInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoleInfo) GoString() string {
+ return s.String()
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *RoleInfo) SetAccountId(v string) *RoleInfo {
+ s.AccountId = &v
+ return s
+}
+
+// SetRoleName sets the RoleName field's value.
+func (s *RoleInfo) SetRoleName(v string) *RoleInfo {
+ s.RoleName = &v
+ return s
+}
+
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+type TooManyRequestsException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s TooManyRequestsException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TooManyRequestsException) GoString() string {
+ return s.String()
+}
+
+func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error {
+ return &TooManyRequestsException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *TooManyRequestsException) Code() string {
+ return "TooManyRequestsException"
+}
+
+// Message returns the exception's message.
+func (s *TooManyRequestsException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *TooManyRequestsException) OrigErr() error {
+ return nil
+}
+
+func (s *TooManyRequestsException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *TooManyRequestsException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *TooManyRequestsException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+type UnauthorizedException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s UnauthorizedException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UnauthorizedException) GoString() string {
+ return s.String()
+}
+
+func newErrorUnauthorizedException(v protocol.ResponseMetadata) error {
+ return &UnauthorizedException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *UnauthorizedException) Code() string {
+ return "UnauthorizedException"
+}
+
+// Message returns the exception's message.
+func (s *UnauthorizedException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *UnauthorizedException) OrigErr() error {
+ return nil
+}
+
+func (s *UnauthorizedException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *UnauthorizedException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *UnauthorizedException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
new file mode 100644
index 000000000..92d82b2af
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
@@ -0,0 +1,44 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sso provides the client and types for making API
+// requests to AWS Single Sign-On.
+//
+// AWS Single Sign-On Portal is a web service that makes it easy for you to
+// assign user access to AWS SSO resources such as the user portal. Users can
+// get AWS account applications and roles assigned to them and get federated
+// into the application.
+//
+// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
+// in the AWS SSO User Guide.
+//
+// This API reference guide describes the AWS SSO Portal operations that you
+// can call programatically and includes detailed information on data types
+// and errors.
+//
+// AWS provides SDKs that consist of libraries and sample code for various programming
+// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
+// provide a convenient way to create programmatic access to AWS SSO and other
+// AWS services. For more information about the AWS SDKs, including how to download
+// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
+//
+// See sso package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
+//
+// Using the Client
+//
+// To contact AWS Single Sign-On with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Single Sign-On client SSO for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New
+package sso
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
new file mode 100644
index 000000000..77a6792e3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
@@ -0,0 +1,44 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sso
+
+import (
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+const (
+
+ // ErrCodeInvalidRequestException for service response error code
+ // "InvalidRequestException".
+ //
+ // Indicates that a problem occurred with the input to the request. For example,
+ // a required parameter might be missing or out of range.
+ ErrCodeInvalidRequestException = "InvalidRequestException"
+
+ // ErrCodeResourceNotFoundException for service response error code
+ // "ResourceNotFoundException".
+ //
+ // The specified resource doesn't exist.
+ ErrCodeResourceNotFoundException = "ResourceNotFoundException"
+
+ // ErrCodeTooManyRequestsException for service response error code
+ // "TooManyRequestsException".
+ //
+ // Indicates that the request is being made too frequently and is more than
+ // what the server can handle.
+ ErrCodeTooManyRequestsException = "TooManyRequestsException"
+
+ // ErrCodeUnauthorizedException for service response error code
+ // "UnauthorizedException".
+ //
+ // Indicates that the request is not authorized. This can happen due to an invalid
+ // access token in the request.
+ ErrCodeUnauthorizedException = "UnauthorizedException"
+)
+
+var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
+ "InvalidRequestException": newErrorInvalidRequestException,
+ "ResourceNotFoundException": newErrorResourceNotFoundException,
+ "TooManyRequestsException": newErrorTooManyRequestsException,
+ "UnauthorizedException": newErrorUnauthorizedException,
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go
new file mode 100644
index 000000000..35175331f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go
@@ -0,0 +1,104 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sso
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/restjson"
+)
+
+// SSO provides the API operation methods for making requests to
+// AWS Single Sign-On. See this package's package overview docs
+// for details on the service.
+//
+// SSO methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type SSO struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "SSO" // Name of service.
+ EndpointsID = "portal.sso" // ID to lookup a service endpoint with.
+ ServiceID = "SSO" // ServiceID is a unique identifier of a specific service.
+)
+
+// New creates a new instance of the SSO client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// mySession := session.Must(session.NewSession())
+//
+// // Create a SSO client from just a session.
+// svc := sso.New(mySession)
+//
+// // Create a SSO client with additional configuration
+// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ if c.SigningNameDerived || len(c.SigningName) == 0 {
+ c.SigningName = "awsssoportal"
+ }
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO {
+ svc := &SSO{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2019-06-10",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(
+ protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
+ )
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a SSO operation and runs any
+// custom request initialization.
+func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
new file mode 100644
index 000000000..4cac247c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
@@ -0,0 +1,86 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package ssoiface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/sso"
+)
+
+// SSOAPI provides an interface to enable mocking the
+// sso.SSO service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // AWS Single Sign-On.
+// func myFunc(svc ssoiface.SSOAPI) bool {
+// // Make svc.GetRoleCredentials request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := sso.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockSSOClient struct {
+// ssoiface.SSOAPI
+// }
+// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockSSOClient{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type SSOAPI interface {
+ GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error)
+ GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error)
+ GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput)
+
+ ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error)
+ ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error)
+ ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput)
+
+ ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error
+ ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error
+
+ ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error)
+ ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error)
+ ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput)
+
+ ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error
+ ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error
+
+ Logout(*sso.LogoutInput) (*sso.LogoutOutput, error)
+ LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error)
+ LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput)
+}
+
+var _ SSOAPI = (*sso.SSO)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index bfc4372f9..3cffd533d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -57,65 +57,38 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// AssumeRole API operation for AWS Security Token Service.
//
// Returns a set of temporary security credentials that you can use to access
-// AWS resources that you might not normally have access to. These temporary
-// credentials consist of an access key ID, a secret access key, and a security
-// token. Typically, you use AssumeRole within your account or for cross-account
-// access. For a comparison of AssumeRole with other API operations that produce
-// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide.
-//
-// You cannot use AWS account root user credentials to call AssumeRole. You
-// must use credentials for an IAM user or an IAM role to call AssumeRole.
-//
-// For cross-account access, imagine that you own multiple accounts and need
-// to access resources in each account. You could create long-term credentials
-// in each account to access those resources. However, managing all those credentials
-// and remembering which one can access which account can be time consuming.
-// Instead, you can create one set of long-term credentials in one account.
-// Then use temporary security credentials to access all the other accounts
-// by assuming roles in those accounts. For more information about roles, see
-// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
-// in the IAM User Guide.
-//
-// Session Duration
-//
-// By default, the temporary security credentials created by AssumeRole last
-// for one hour. However, you can use the optional DurationSeconds parameter
-// to specify the duration of your session. You can provide a value from 900
-// seconds (15 minutes) up to the maximum session duration setting for the role.
-// This setting can have a value from 1 hour to 12 hours. To learn how to view
-// the maximum value for your role, see View the Maximum Session Duration Setting
-// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
-// in the IAM User Guide. The maximum session duration limit applies when you
-// use the AssumeRole* API operations or the assume-role* CLI commands. However
-// the limit does not apply when you use those operations to create a console
-// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// Amazon Web Services resources that you might not normally have access to.
+// These temporary credentials consist of an access key ID, a secret access
+// key, and a security token. Typically, you use AssumeRole within your account
+// or for cross-account access. For a comparison of AssumeRole with other API
+// operations that produce temporary credentials, see Requesting Temporary Security
+// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// Permissions
//
// The temporary security credentials created by AssumeRole can be used to make
-// API calls to any AWS service with the following exception: You cannot call
-// the AWS STS GetFederationToken or GetSessionToken API operations.
+// API calls to any Amazon Web Services service with the following exception:
+// You cannot call the STS GetFederationToken or GetSessionToken API operations.
//
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
-// use as managed session policies. The plain text that you use for both inline
+// use as managed session policies. The plaintext that you use for both inline
// and managed session policies can't exceed 2,048 characters. Passing policies
// to this operation returns new temporary credentials. The resulting session's
// permissions are the intersection of the role's identity-based policy and
// the session policies. You can use the role's temporary credentials in subsequent
-// AWS API calls to access resources in the account that owns the role. You
-// cannot use session policies to grant more permissions than those allowed
-// by the identity-based policy of the role that is being assumed. For more
-// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// Amazon Web Services API calls to access resources in the account that owns
+// the role. You cannot use session policies to grant more permissions than
+// those allowed by the identity-based policy of the role that is being assumed.
+// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
-// To assume a role from a different account, your AWS account must be trusted
-// by the role. The trust relationship is defined in the role's trust policy
-// when the role is created. That trust policy states which accounts are allowed
+// To assume a role from a different account, your account must be trusted by
+// the role. The trust relationship is defined in the role's trust policy when
+// the role is created. That trust policy states which accounts are allowed
// to delegate that access to users in the account.
//
// A user who wants to access a role in a different account must also have permissions
@@ -157,12 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
//
// (Optional) You can include multi-factor authentication (MFA) information
// when you call AssumeRole. This is useful for cross-account scenarios to ensure
-// that the user that assumes the role has been authenticated with an AWS MFA
-// device. In that scenario, the trust policy of the role being assumed includes
-// a condition that tests for MFA authentication. If the caller does not include
-// valid MFA information, the request to assume the role is denied. The condition
-// in a trust policy that tests for MFA authentication might look like the following
-// example.
+// that the user that assumes the role has been authenticated with an Amazon
+// Web Services MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication. If the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
//
// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
//
@@ -188,11 +161,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An AWS conversion compresses the
-// session policy document, session policy ARNs, and session tags into a packed
-// binary format that has a separate limit. The error message indicates by percentage
-// how close the policies and tags are to the upper size limit. For more information,
-// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// and session tags combined was too large. An Amazon Web Services conversion
+// compresses the session policy document, session policy ARNs, and session
+// tags into a packed binary format that has a separate limit. The error message
+// indicates by percentage how close the policies and tags are to the upper
+// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// You could receive this error even though you meet other defined session policy
@@ -204,7 +177,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
-// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// * ErrCodeExpiredTokenException "ExpiredTokenException"
@@ -280,16 +254,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
//
// Returns a set of temporary security credentials for users who have been authenticated
// via a SAML authentication response. This operation provides a mechanism for
-// tying an enterprise identity store or directory to role-based AWS access
-// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
-// with the other API operations that produce temporary credentials, see Requesting
-// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// tying an enterprise identity store or directory to role-based Amazon Web
+// Services access without user-specific credentials or configuration. For a
+// comparison of AssumeRoleWithSAML with the other API operations that produce
+// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// The temporary security credentials returned by this operation consist of
// an access key ID, a secret access key, and a security token. Applications
-// can use these temporary security credentials to sign calls to AWS services.
+// can use these temporary security credentials to sign calls to Amazon Web
+// Services services.
//
// Session Duration
//
@@ -308,32 +283,42 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
// in the IAM User Guide.
//
+// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining)
+// limits your CLI or Amazon Web Services API role session to a maximum of one
+// hour. When you use the AssumeRole API operation to assume a role, you can
+// specify the duration of your role session with the DurationSeconds parameter.
+// You can specify a parameter value of up to 43200 seconds (12 hours), depending
+// on the maximum session duration setting for your role. However, if you assume
+// a role using role chaining and provide a DurationSeconds parameter value
+// greater than one hour, the operation fails.
+//
// Permissions
//
// The temporary security credentials created by AssumeRoleWithSAML can be used
-// to make API calls to any AWS service with the following exception: you cannot
-// call the STS GetFederationToken or GetSessionToken API operations.
+// to make API calls to any Amazon Web Services service with the following exception:
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
//
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
-// use as managed session policies. The plain text that you use for both inline
+// use as managed session policies. The plaintext that you use for both inline
// and managed session policies can't exceed 2,048 characters. Passing policies
// to this operation returns new temporary credentials. The resulting session's
// permissions are the intersection of the role's identity-based policy and
// the session policies. You can use the role's temporary credentials in subsequent
-// AWS API calls to access resources in the account that owns the role. You
-// cannot use session policies to grant more permissions than those allowed
-// by the identity-based policy of the role that is being assumed. For more
-// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// Amazon Web Services API calls to access resources in the account that owns
+// the role. You cannot use session policies to grant more permissions than
+// those allowed by the identity-based policy of the role that is being assumed.
+// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
-// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
-// The identity of the caller is validated by using keys in the metadata document
-// that is uploaded for the SAML provider entity for your identity provider.
+// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
+// security credentials. The identity of the caller is validated by using keys
+// in the metadata document that is uploaded for the SAML provider entity for
+// your identity provider.
//
-// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
-// logs. The entry includes the value in the NameID element of the SAML assertion.
+// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs.
+// The entry includes the value in the NameID element of the SAML assertion.
// We recommend that you use a NameIDType that is not associated with any personally
// identifiable information (PII). For example, you could instead use the persistent
// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
@@ -346,16 +331,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
-// You can pass up to 50 session tags. The plain text session tag keys can’t
+// You can pass up to 50 session tags. The plaintext session tag keys can’t
// exceed 128 characters and the values can’t exceed 256 characters. For these
// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
-// An AWS conversion compresses the passed session policies and session tags
-// into a packed binary format that has a separate limit. Your request can fail
-// for this limit even if your plain text meets the other requirements. The
-// PackedPolicySize response element indicates by percentage how close the policies
-// and tags for your request are to the upper size limit.
+// An Amazon Web Services conversion compresses the passed session policies
+// and session tags into a packed binary format that has a separate limit. Your
+// request can fail for this limit even if your plaintext meets the other requirements.
+// The PackedPolicySize response element indicates by percentage how close the
+// policies and tags for your request are to the upper size limit.
//
// You can pass a session tag with the same key as a tag that is attached to
// the role. When you do, session tags override the role's tags with the same
@@ -375,10 +360,11 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// SAML Configuration
//
// Before your application can call AssumeRoleWithSAML, you must configure your
-// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
-// you must use AWS Identity and Access Management (IAM) to create a SAML provider
-// entity in your AWS account that represents your identity provider. You must
-// also create an IAM role that specifies this SAML provider in its trust policy.
+// SAML identity provider (IdP) to issue the claims required by Amazon Web Services.
+// Additionally, you must use Identity and Access Management (IAM) to create
+// a SAML provider entity in your Amazon Web Services account that represents
+// your identity provider. You must also create an IAM role that specifies this
+// SAML provider in its trust policy.
//
// For more information, see the following resources:
//
@@ -408,11 +394,11 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An AWS conversion compresses the
-// session policy document, session policy ARNs, and session tags into a packed
-// binary format that has a separate limit. The error message indicates by percentage
-// how close the policies and tags are to the upper size limit. For more information,
-// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// and session tags combined was too large. An Amazon Web Services conversion
+// compresses the session policy document, session policy ARNs, and session
+// tags into a packed binary format that has a separate limit. The error message
+// indicates by percentage how close the policies and tags are to the upper
+// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// You could receive this error even though you meet other defined session policy
@@ -428,8 +414,9 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// can also mean that the claim has expired or has been explicitly revoked.
//
// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
-// The web identity token that was passed could not be validated by AWS. Get
-// a new identity token from the identity provider and then retry the request.
+// The web identity token that was passed could not be validated by Amazon Web
+// Services. Get a new identity token from the identity provider and then retry
+// the request.
//
// * ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
@@ -439,7 +426,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
-// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
@@ -515,30 +503,33 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// Connect-compatible identity provider.
//
// For mobile applications, we recommend that you use Amazon Cognito. You can
-// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
-// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/)
-// to uniquely identify a user. You can also supply the user with a consistent
-// identity throughout the lifetime of an application.
+// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide
+// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android
+// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify
+// a user. You can also supply the user with a consistent identity throughout
+// the lifetime of an application.
//
// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
-// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
-// in the AWS SDK for iOS Developer Guide.
-//
-// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
-// credentials. Therefore, you can distribute an application (for example, on
-// mobile devices) that requests temporary security credentials without including
-// long-term AWS credentials in the application. You also don't need to deploy
-// server-based proxy services that use long-term AWS credentials. Instead,
-// the identity of the caller is validated by using a token from the web identity
-// provider. For a comparison of AssumeRoleWithWebIdentity with the other API
-// operations that produce temporary credentials, see Requesting Temporary Security
-// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito
+// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the Amazon Web Services SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web
+// Services security credentials. Therefore, you can distribute an application
+// (for example, on mobile devices) that requests temporary security credentials
+// without including long-term Amazon Web Services credentials in the application.
+// You also don't need to deploy server-based proxy services that use long-term
+// Amazon Web Services credentials. Instead, the identity of the caller is validated
+// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity
+// with the other API operations that produce temporary credentials, see Requesting
+// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// The temporary security credentials returned by this API consist of an access
// key ID, a secret access key, and a security token. Applications can use these
-// temporary security credentials to sign calls to AWS service API operations.
+// temporary security credentials to sign calls to Amazon Web Services service
+// API operations.
//
// Session Duration
//
@@ -558,21 +549,22 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// Permissions
//
// The temporary security credentials created by AssumeRoleWithWebIdentity can
-// be used to make API calls to any AWS service with the following exception:
-// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+// be used to make API calls to any Amazon Web Services service with the following
+// exception: you cannot call the STS GetFederationToken or GetSessionToken
+// API operations.
//
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
-// use as managed session policies. The plain text that you use for both inline
+// use as managed session policies. The plaintext that you use for both inline
// and managed session policies can't exceed 2,048 characters. Passing policies
// to this operation returns new temporary credentials. The resulting session's
// permissions are the intersection of the role's identity-based policy and
// the session policies. You can use the role's temporary credentials in subsequent
-// AWS API calls to access resources in the account that owns the role. You
-// cannot use session policies to grant more permissions than those allowed
-// by the identity-based policy of the role that is being assumed. For more
-// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// Amazon Web Services API calls to access resources in the account that owns
+// the role. You cannot use session policies to grant more permissions than
+// those allowed by the identity-based policy of the role that is being assumed.
+// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
// Tags
@@ -583,16 +575,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
-// You can pass up to 50 session tags. The plain text session tag keys can’t
+// You can pass up to 50 session tags. The plaintext session tag keys can’t
// exceed 128 characters and the values can’t exceed 256 characters. For these
// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
-// An AWS conversion compresses the passed session policies and session tags
-// into a packed binary format that has a separate limit. Your request can fail
-// for this limit even if your plain text meets the other requirements. The
-// PackedPolicySize response element indicates by percentage how close the policies
-// and tags for your request are to the upper size limit.
+// An Amazon Web Services conversion compresses the passed session policies
+// and session tags into a packed binary format that has a separate limit. Your
+// request can fail for this limit even if your plaintext meets the other requirements.
+// The PackedPolicySize response element indicates by percentage how close the
+// policies and tags for your request are to the upper size limit.
//
// You can pass a session tag with the same key as a tag that is attached to
// the role. When you do, the session tag overrides the role tag with the same
@@ -617,9 +609,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// the identity provider that is associated with the identity token. In other
// words, the identity provider must be specified in the role's trust policy.
//
-// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail
// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
-// of the provided Web Identity Token. We recommend that you avoid using any
+// of the provided web identity token. We recommend that you avoid using any
// personally identifiable information (PII) in this field. For example, you
// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
@@ -633,10 +625,10 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
// Walk through the process of authenticating through Login with Amazon,
// Facebook, or Google, getting temporary security credentials, and then
-// using those credentials to make a request to AWS.
+// using those credentials to make a request to Amazon Web Services.
//
-// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and
-// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
+// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
+// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
// These toolkits contain sample apps that show how to invoke the identity
// providers. The toolkits then show how to use the information from these
// providers to get and use temporary security credentials.
@@ -660,11 +652,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An AWS conversion compresses the
-// session policy document, session policy ARNs, and session tags into a packed
-// binary format that has a separate limit. The error message indicates by percentage
-// how close the policies and tags are to the upper size limit. For more information,
-// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// and session tags combined was too large. An Amazon Web Services conversion
+// compresses the session policy document, session policy ARNs, and session
+// tags into a packed binary format that has a separate limit. The error message
+// indicates by percentage how close the policies and tags are to the upper
+// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// You could receive this error even though you meet other defined session policy
@@ -687,8 +679,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// error persists, the identity provider might be down or not responding.
//
// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
-// The web identity token that was passed could not be validated by AWS. Get
-// a new identity token from the identity provider and then retry the request.
+// The web identity token that was passed could not be validated by Amazon Web
+// Services. Get a new identity token from the identity provider and then retry
+// the request.
//
// * ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
@@ -698,7 +691,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
-// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
@@ -768,16 +762,18 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
// DecodeAuthorizationMessage API operation for AWS Security Token Service.
//
// Decodes additional information about the authorization status of a request
-// from an encoded message returned in response to an AWS request.
+// from an encoded message returned in response to an Amazon Web Services request.
//
// For example, if a user is not authorized to perform an operation that he
// or she has requested, the request returns a Client.UnauthorizedOperation
-// response (an HTTP 403 response). Some AWS operations additionally return
-// an encoded message that can provide details about this authorization failure.
+// response (an HTTP 403 response). Some Amazon Web Services operations additionally
+// return an encoded message that can provide details about this authorization
+// failure.
//
-// Only certain AWS operations return an encoded authorization message. The
-// documentation for an individual operation indicates whether that operation
-// returns an encoded message in addition to returning an HTTP code.
+// Only certain Amazon Web Services operations return an encoded authorization
+// message. The documentation for an individual operation indicates whether
+// that operation returns an encoded message in addition to returning an HTTP
+// code.
//
// The message is encoded because the details of the authorization status can
// constitute privileged information that the user who requested the operation
@@ -888,12 +884,12 @@ func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *reques
// in the IAM User Guide.
//
// When you pass an access key ID to this operation, it returns the ID of the
-// AWS account to which the keys belong. Access key IDs beginning with AKIA
-// are long-term credentials for an IAM user or the AWS account root user. Access
-// key IDs beginning with ASIA are temporary credentials that are created using
-// STS operations. If the account in the response belongs to you, you can sign
-// in as the root user and review your root user access keys. Then, you can
-// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
+// Amazon Web Services account to which the keys belong. Access key IDs beginning
+// with AKIA are long-term credentials for an IAM user or the Amazon Web Services
+// account root user. Access key IDs beginning with ASIA are temporary credentials
+// that are created using STS operations. If the account in the response belongs
+// to you, you can sign in as the root user and review your root user access
+// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
// to learn which IAM user owns the keys. To learn who requested the temporary
// credentials for an ASIA access key, view the STS events in your CloudTrail
// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html)
@@ -1069,7 +1065,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// For a comparison of GetFederationToken with the other API operations that
// produce temporary credentials, see Requesting Temporary Security Credentials
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// You can create a mobile-based or browser-based app that can authenticate
@@ -1081,11 +1077,11 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// in the IAM User Guide.
//
// You can also call GetFederationToken using the security credentials of an
-// AWS account root user, but we do not recommend it. Instead, we recommend
-// that you create an IAM user for the purpose of the proxy application. Then
-// attach a policy to the IAM user that limits federated users to only the actions
-// and resources that they need to access. For more information, see IAM Best
-// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// Amazon Web Services account root user, but we do not recommend it. Instead,
+// we recommend that you create an IAM user for the purpose of the proxy application.
+// Then attach a policy to the IAM user that limits federated users to only
+// the actions and resources that they need to access. For more information,
+// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
// in the IAM User Guide.
//
// Session duration
@@ -1093,15 +1089,81 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// The temporary credentials are valid for the specified duration, from 900
// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
// session duration is 43,200 seconds (12 hours). Temporary credentials that
-// are obtained by using AWS account root user credentials have a maximum duration
-// of 3,600 seconds (1 hour).
+// are obtained by using Amazon Web Services account root user credentials have
+// a maximum duration of 3,600 seconds (1 hour).
//
// Permissions
//
// You can use the temporary credentials created by GetFederationToken in any
-// AWS service except the following:
+// Amazon Web Services service except the following:
//
-// * You cannot call any IAM operations using the AWS CLI or the AWS API.
+// * You cannot call any IAM operations using the CLI or the Amazon Web Services
+// API.
+//
+// * You cannot call any STS operations except GetCallerIdentity.
+//
+// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plaintext that you use for both inline
+// and managed session policies can't exceed 2,048 characters.
+//
+// Though the session policy parameters are optional, if you do not pass a policy,
+// then the resulting federated user session has no permissions. When you pass
+// session policies, the session permissions are the intersection of the IAM
+// user policies and the session policies that you pass. This gives you a way
+// to further restrict the permissions for a federated user. You cannot use
+// session policies to grant more permissions than those that are defined in
+// the permissions policy of the IAM user. For more information, see Session
+// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. For information about using GetFederationToken to
+// create temporary security credentials, see GetFederationToken—Federation
+// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// You can use the credentials to access a resource that has a resource-based
+// policy. If that policy specifically references the federated user session
+// in the Principal element of the policy, the session has the permissions allowed
+// by the policy. These permissions are granted in addition to the permissions
+// granted by the session policies.
+//
+// Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These are called
+// session tags. For more information about session tags, see Passing Session
+// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider. In this case, we recommend
+// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
+// in the IAM User Guide.
+//
+// You can also call GetFederationToken using the security credentials of an
+// Amazon Web Services account root user, but we do not recommend it. Instead,
+// we recommend that you create an IAM user for the purpose of the proxy application.
+// Then attach a policy to the IAM user that limits federated users to only
+// the actions and resources that they need to access. For more information,
+// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// Session duration
+//
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// session duration is 43,200 seconds (12 hours). Temporary credentials that
+// are obtained by using Amazon Web Services account root user credentials have
+// a maximum duration of 3,600 seconds (1 hour).
+//
+// Permissions
+//
+// You can use the temporary credentials created by GetFederationToken in any
+// Amazon Web Services service except the following:
+//
+// * You cannot call any IAM operations using the CLI or the Amazon Web Services
+// API.
//
// * You cannot call any STS operations except GetCallerIdentity.
//
@@ -1163,11 +1225,11 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An AWS conversion compresses the
-// session policy document, session policy ARNs, and session tags into a packed
-// binary format that has a separate limit. The error message indicates by percentage
-// how close the policies and tags are to the upper size limit. For more information,
-// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// and session tags combined was too large. An Amazon Web Services conversion
+// compresses the session policy document, session policy ARNs, and session
+// tags into a packed binary format that has a separate limit. The error message
+// indicates by percentage how close the policies and tags are to the upper
+// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// You could receive this error even though you meet other defined session policy
@@ -1179,7 +1241,8 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
-// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
@@ -1248,51 +1311,53 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// GetSessionToken API operation for AWS Security Token Service.
//
-// Returns a set of temporary credentials for an AWS account or IAM user. The
-// credentials consist of an access key ID, a secret access key, and a security
-// token. Typically, you use GetSessionToken if you want to use MFA to protect
-// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances.
-// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA
-// code that is associated with their MFA device. Using the temporary security
-// credentials that are returned from the call, IAM users can then make programmatic
-// calls to API operations that require MFA authentication. If you do not supply
-// a correct MFA code, then the API returns an access denied error. For a comparison
-// of GetSessionToken with the other API operations that produce temporary credentials,
-// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// Returns a set of temporary credentials for an Amazon Web Services account
+// or IAM user. The credentials consist of an access key ID, a secret access
+// key, and a security token. Typically, you use GetSessionToken if you want
+// to use MFA to protect programmatic calls to specific Amazon Web Services
+// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would
+// need to call GetSessionToken and submit an MFA code that is associated with
+// their MFA device. Using the temporary security credentials that are returned
+// from the call, IAM users can then make programmatic calls to API operations
+// that require MFA authentication. If you do not supply a correct MFA code,
+// then the API returns an access denied error. For a comparison of GetSessionToken
+// with the other API operations that produce temporary credentials, see Requesting
+// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// Session Duration
//
-// The GetSessionToken operation must be called by using the long-term AWS security
-// credentials of the AWS account root user or an IAM user. Credentials that
-// are created by IAM users are valid for the duration that you specify. This
-// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600
-// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials
-// based on account credentials can range from 900 seconds (15 minutes) up to
-// 3,600 seconds (1 hour), with a default of 1 hour.
+// The GetSessionToken operation must be called by using the long-term Amazon
+// Web Services security credentials of the Amazon Web Services account root
+// user or an IAM user. Credentials that are created by IAM users are valid
+// for the duration that you specify. This duration can range from 900 seconds
+// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default
+// of 43,200 seconds (12 hours). Credentials based on account credentials can
+// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a
+// default of 1 hour.
//
// Permissions
//
// The temporary security credentials created by GetSessionToken can be used
-// to make API calls to any AWS service with the following exceptions:
+// to make API calls to any Amazon Web Services service with the following exceptions:
//
// * You cannot call any IAM API operations unless MFA authentication information
// is included in the request.
//
// * You cannot call any STS API except AssumeRole or GetCallerIdentity.
//
-// We recommend that you do not call GetSessionToken with AWS account root user
-// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// We recommend that you do not call GetSessionToken with Amazon Web Services
+// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
// by creating one or more IAM users, giving them the necessary permissions,
-// and using IAM users for everyday interaction with AWS.
+// and using IAM users for everyday interaction with Amazon Web Services.
//
// The credentials that are returned by GetSessionToken are based on permissions
// associated with the user whose credentials were used to call the operation.
-// If GetSessionToken is called using AWS account root user credentials, the
-// temporary credentials have root user permissions. Similarly, if GetSessionToken
-// is called using the credentials of an IAM user, the temporary credentials
-// have the same permissions as the IAM user.
+// If GetSessionToken is called using Amazon Web Services account root user
+// credentials, the temporary credentials have root user permissions. Similarly,
+// if GetSessionToken is called using the credentials of an IAM user, the temporary
+// credentials have the same permissions as the IAM user.
//
// For more information about using GetSessionToken to create temporary credentials,
// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
@@ -1310,7 +1375,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
-// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
@@ -1338,14 +1404,15 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken
type AssumeRoleInput struct {
_ struct{} `type:"structure"`
- // The duration, in seconds, of the role session. The value can range from 900
- // seconds (15 minutes) up to the maximum session duration setting for the role.
- // This setting can have a value from 1 hour to 12 hours. If you specify a value
- // higher than this setting, the operation fails. For example, if you specify
- // a session duration of 12 hours, but your administrator set the maximum session
- // duration to 6 hours, your operation fails. To learn how to view the maximum
- // value for your role, see View the Maximum Session Duration Setting for a
- // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // The duration, in seconds, of the role session. The value specified can can
+ // range from 900 seconds (15 minutes) up to the maximum session duration that
+ // is set for the role. The maximum session duration setting can have a value
+ // from 1 hour to 12 hours. If you specify a value higher than this setting
+ // or the administrator setting (whichever is lower), the operation fails. For
+ // example, if you specify a session duration of 12 hours, but your administrator
+ // set the maximum session duration to 6 hours, your operation fails. To learn
+ // how to view the maximum value for your role, see View the Maximum Session
+ // Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
// in the IAM User Guide.
//
// By default, the value is set to 3600 seconds.
@@ -1355,7 +1422,7 @@ type AssumeRoleInput struct {
// to the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
// information, see Creating a URL that Enables Federated Users to Access the
- // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
// in the IAM User Guide.
DurationSeconds *int64 `min:"900" type:"integer"`
@@ -1367,8 +1434,8 @@ type AssumeRoleInput struct {
// of the trusting account might send an external ID to the administrator of
// the trusted account. That way, only someone with the ID can assume the role,
// rather than everyone in the account. For more information about the external
- // ID, see How to Use an External ID When Granting Access to Your AWS Resources
- // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // ID, see How to Use an External ID When Granting Access to Your Amazon Web
+ // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
// in the IAM User Guide.
//
// The regex used to validate this parameter is a string of characters consisting
@@ -1381,23 +1448,24 @@ type AssumeRoleInput struct {
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use
- // the role's temporary credentials in subsequent AWS API calls to access resources
- // in the account that owns the role. You cannot use session policies to grant
- // more permissions than those allowed by the identity-based policy of the role
- // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // the role's temporary credentials in subsequent Amazon Web Services API calls
+ // to access resources in the account that owns the role. You cannot use session
+ // policies to grant more permissions than those allowed by the identity-based
+ // policy of the role that is being assumed. For more information, see Session
+ // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
- // The plain text that you use for both inline and managed session policies
- // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
- // character from the space character to the end of the valid character list
- // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
- // and carriage return (\u000D) characters.
- //
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -1405,24 +1473,24 @@ type AssumeRoleInput struct {
// as the role.
//
// This parameter is optional. You can provide up to 10 managed policy ARNs.
- // However, the plain text that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs,
- // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the AWS General Reference.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see Amazon
+ // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the Amazon Web Services General Reference.
//
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
// policy and the session policies. You can use the role's temporary credentials
- // in subsequent AWS API calls to access resources in the account that owns
- // the role. You cannot use session policies to grant more permissions than
- // those allowed by the identity-based policy of the role that is being assumed.
- // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in subsequent Amazon Web Services API calls to access resources in the account
+ // that owns the role. You cannot use session policies to grant more permissions
+ // than those allowed by the identity-based policy of the role that is being
+ // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
PolicyArns []*PolicyDescriptorType `type:"list"`
@@ -1439,7 +1507,7 @@ type AssumeRoleInput struct {
// account that owns the role. The role session name is also used in the ARN
// of the assumed role principal. This means that subsequent cross-account API
// requests that use the temporary security credentials will expose the role
- // session name to the external account in their AWS CloudTrail logs.
+ // session name to the external account in their CloudTrail logs.
//
// The regex used to validate this parameter is a string of characters consisting
// of upper- and lower-case alphanumeric characters with no spaces. You can
@@ -1459,22 +1527,41 @@ type AssumeRoleInput struct {
// also include underscores or any of the following characters: =,.@-
SerialNumber *string `min:"9" type:"string"`
+ // The source identity specified by the principal that is calling the AssumeRole
+ // operation.
+ //
+ // You can require users to specify a source identity when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. You can use source identity information in CloudTrail logs to determine
+ // who took actions with a role. You can use the aws:SourceIdentity condition
+ // key to further control access to Amazon Web Services resources based on the
+ // value of source identity. For more information about using source identity,
+ // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-. You cannot
+ // use a value that begins with the text aws:. This prefix is reserved for Amazon
+ // Web Services internal use.
+ SourceIdentity *string `min:"2" type:"string"`
+
// A list of session tags that you want to pass. Each session tag consists of
// a key name and an associated value. For more information about session tags,
- // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+ // see Tagging STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
- // This parameter is optional. You can pass up to 50 session tags. The plain
- // text session tag keys can’t exceed 128 characters, and the values can’t
- // exceed 256 characters. For these and additional limits, see IAM and STS Character
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters, and the values can’t exceed
+ // 256 characters. For these and additional limits, see IAM and STS Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
//
// You can pass a session tag with the same key as a tag that is already attached
// to the role. When you do, session tags override a role tag with the same
@@ -1489,15 +1576,16 @@ type AssumeRoleInput struct {
// Additionally, if you used temporary credentials to perform this operation,
// the new session inherits any transitive session tags from the calling session.
// If you pass a session tag with the same key as an inherited tag, the operation
- // fails. To view the inherited tags for a session, see the AWS CloudTrail logs.
+ // fails. To view the inherited tags for a session, see the CloudTrail logs.
// For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs)
// in the IAM User Guide.
Tags []*Tag `type:"list"`
// The value provided by the MFA device, if the trust policy of the role being
- // assumed requires MFA (that is, if the policy includes a condition that tests
- // for MFA). If the role being assumed requires MFA and if the TokenCode value
- // is missing or expired, the AssumeRole call returns an "access denied" error.
+ // assumed requires MFA. (In other words, if the policy includes a condition
+ // that tests for MFA). If the role being assumed requires MFA and if the TokenCode
+ // value is missing or expired, the AssumeRole call returns an "access denied"
+ // error.
//
// The format for this parameter, as described by its regex pattern, is a sequence
// of six numeric digits.
@@ -1554,6 +1642,9 @@ func (s *AssumeRoleInput) Validate() error {
if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
}
+ if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2))
+ }
if s.TokenCode != nil && len(*s.TokenCode) < 6 {
invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
}
@@ -1626,6 +1717,12 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
return s
}
+// SetSourceIdentity sets the SourceIdentity field's value.
+func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput {
+ s.SourceIdentity = &v
+ return s
+}
+
// SetTags sets the Tags field's value.
func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput {
s.Tags = v
@@ -1645,7 +1742,8 @@ func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput {
}
// Contains the response to a successful AssumeRole request, including temporary
-// AWS credentials that can be used to make AWS requests.
+// Amazon Web Services credentials that can be used to make Amazon Web Services
+// requests.
type AssumeRoleOutput struct {
_ struct{} `type:"structure"`
@@ -1668,6 +1766,23 @@ type AssumeRoleOutput struct {
// packed size is greater than 100 percent, which means the policies and tags
// exceeded the allowed space.
PackedPolicySize *int64 `type:"integer"`
+
+ // The source identity specified by the principal that is calling the AssumeRole
+ // operation.
+ //
+ // You can require users to specify a source identity when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. You can use source identity information in CloudTrail logs to determine
+ // who took actions with a role. You can use the aws:SourceIdentity condition
+ // key to further control access to Amazon Web Services resources based on the
+ // value of source identity. For more information about using source identity,
+ // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SourceIdentity *string `min:"2" type:"string"`
}
// String returns the string representation
@@ -1698,6 +1813,12 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
return s
}
+// SetSourceIdentity sets the SourceIdentity field's value.
+func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput {
+ s.SourceIdentity = &v
+ return s
+}
+
type AssumeRoleWithSAMLInput struct {
_ struct{} `type:"structure"`
@@ -1721,7 +1842,7 @@ type AssumeRoleWithSAMLInput struct {
// to the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
// information, see Creating a URL that Enables Federated Users to Access the
- // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
// in the IAM User Guide.
DurationSeconds *int64 `min:"900" type:"integer"`
@@ -1730,23 +1851,24 @@ type AssumeRoleWithSAMLInput struct {
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use
- // the role's temporary credentials in subsequent AWS API calls to access resources
- // in the account that owns the role. You cannot use session policies to grant
- // more permissions than those allowed by the identity-based policy of the role
- // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // the role's temporary credentials in subsequent Amazon Web Services API calls
+ // to access resources in the account that owns the role. You cannot use session
+ // policies to grant more permissions than those allowed by the identity-based
+ // policy of the role that is being assumed. For more information, see Session
+ // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
- // The plain text that you use for both inline and managed session policies
- // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
- // character from the space character to the end of the valid character list
- // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
- // and carriage return (\u000D) characters.
- //
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -1754,24 +1876,24 @@ type AssumeRoleWithSAMLInput struct {
// as the role.
//
// This parameter is optional. You can provide up to 10 managed policy ARNs.
- // However, the plain text that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs,
- // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the AWS General Reference.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see Amazon
+ // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the Amazon Web Services General Reference.
//
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
// policy and the session policies. You can use the role's temporary credentials
- // in subsequent AWS API calls to access resources in the account that owns
- // the role. You cannot use session policies to grant more permissions than
- // those allowed by the identity-based policy of the role that is being assumed.
- // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in subsequent Amazon Web Services API calls to access resources in the account
+ // that owns the role. You cannot use session policies to grant more permissions
+ // than those allowed by the identity-based policy of the role that is being
+ // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
PolicyArns []*PolicyDescriptorType `type:"list"`
@@ -1786,7 +1908,7 @@ type AssumeRoleWithSAMLInput struct {
// RoleArn is a required field
RoleArn *string `min:"20" type:"string" required:"true"`
- // The base-64 encoded SAML authentication response provided by the IdP.
+ // The base64 encoded SAML authentication response provided by the IdP.
//
// For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
// in the IAM User Guide.
@@ -1886,7 +2008,8 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML
}
// Contains the response to a successful AssumeRoleWithSAML request, including
-// temporary AWS credentials that can be used to make AWS requests.
+// temporary Amazon Web Services credentials that can be used to make Amazon
+// Web Services requests.
type AssumeRoleWithSAMLOutput struct {
_ struct{} `type:"structure"`
@@ -1908,10 +2031,17 @@ type AssumeRoleWithSAMLOutput struct {
// The value of the Issuer element of the SAML assertion.
Issuer *string `type:"string"`
- // A hash value based on the concatenation of the Issuer response value, the
- // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
- // provider in IAM. The combination of NameQualifier and Subject can be used
- // to uniquely identify a federated user.
+ // A hash value based on the concatenation of the following:
+ //
+ // * The Issuer response value.
+ //
+ // * The Amazon Web Services account ID.
+ //
+ // * The friendly name (the last part of the ARN) of the SAML provider in
+ // IAM.
+ //
+ // The combination of NameQualifier and Subject can be used to uniquely identify
+ // a federated user.
//
// The following pseudocode shows how the hash value is calculated:
//
@@ -1925,6 +2055,26 @@ type AssumeRoleWithSAMLOutput struct {
// exceeded the allowed space.
PackedPolicySize *int64 `type:"integer"`
+ // The value in the SourceIdentity attribute in the SAML assertion.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with
+ // that user. After the source identity is set, the value cannot be changed.
+ // It is present in the request for all actions that are taken by the role and
+ // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
+ // sessions. You can configure your SAML identity provider to use an attribute
+ // associated with your users, like user name or email, as the source identity
+ // when calling AssumeRoleWithSAML. You do this by adding an attribute to the
+ // SAML assertion. For more information about using source identity, see Monitor
+ // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SourceIdentity *string `min:"2" type:"string"`
+
// The value of the NameID element in the Subject element of the SAML assertion.
Subject *string `type:"string"`
@@ -1985,6 +2135,12 @@ func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithS
return s
}
+// SetSourceIdentity sets the SourceIdentity field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput {
+ s.SourceIdentity = &v
+ return s
+}
+
// SetSubject sets the Subject field's value.
func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
s.Subject = &v
@@ -2017,7 +2173,7 @@ type AssumeRoleWithWebIdentityInput struct {
// to the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
// information, see Creating a URL that Enables Federated Users to Access the
- // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
// in the IAM User Guide.
DurationSeconds *int64 `min:"900" type:"integer"`
@@ -2026,23 +2182,24 @@ type AssumeRoleWithWebIdentityInput struct {
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use
- // the role's temporary credentials in subsequent AWS API calls to access resources
- // in the account that owns the role. You cannot use session policies to grant
- // more permissions than those allowed by the identity-based policy of the role
- // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // the role's temporary credentials in subsequent Amazon Web Services API calls
+ // to access resources in the account that owns the role. You cannot use session
+ // policies to grant more permissions than those allowed by the identity-based
+ // policy of the role that is being assumed. For more information, see Session
+ // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
- // The plain text that you use for both inline and managed session policies
- // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
- // character from the space character to the end of the valid character list
- // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
- // and carriage return (\u000D) characters.
- //
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -2050,24 +2207,24 @@ type AssumeRoleWithWebIdentityInput struct {
// as the role.
//
// This parameter is optional. You can provide up to 10 managed policy ARNs.
- // However, the plain text that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs,
- // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the AWS General Reference.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see Amazon
+ // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the Amazon Web Services General Reference.
//
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
// policy and the session policies. You can use the role's temporary credentials
- // in subsequent AWS API calls to access resources in the account that owns
- // the role. You cannot use session policies to grant more permissions than
- // those allowed by the identity-based policy of the role that is being assumed.
- // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in subsequent Amazon Web Services API calls to access resources in the account
+ // that owns the role. You cannot use session policies to grant more permissions
+ // than those allowed by the identity-based policy of the role that is being
+ // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
PolicyArns []*PolicyDescriptorType `type:"list"`
@@ -2207,7 +2364,8 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo
}
// Contains the response to a successful AssumeRoleWithWebIdentity request,
-// including temporary AWS credentials that can be used to make AWS requests.
+// including temporary Amazon Web Services credentials that can be used to make
+// Amazon Web Services requests.
type AssumeRoleWithWebIdentityOutput struct {
_ struct{} `type:"structure"`
@@ -2242,6 +2400,29 @@ type AssumeRoleWithWebIdentityOutput struct {
// in the AssumeRoleWithWebIdentity request.
Provider *string `type:"string"`
+ // The value of the source identity that is returned in the JSON web token (JWT)
+ // from the identity provider.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with
+ // that user. After the source identity is set, the value cannot be changed.
+ // It is present in the request for all actions that are taken by the role and
+ // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
+ // sessions. You can configure your identity provider to use an attribute associated
+ // with your users, like user name or email, as the source identity when calling
+ // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web
+ // token. To learn more about OIDC tokens and claims, see Using Tokens with
+ // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html)
+ // in the Amazon Cognito Developer Guide. For more information about using source
+ // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SourceIdentity *string `min:"2" type:"string"`
+
// The unique user identifier that is returned by the identity provider. This
// identifier is associated with the WebIdentityToken that was submitted with
// the AssumeRoleWithWebIdentity call. The identifier is typically unique to
@@ -2291,6 +2472,12 @@ func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithW
return s
}
+// SetSourceIdentity sets the SourceIdentity field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput {
+ s.SourceIdentity = &v
+ return s
+}
+
// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
s.SubjectFromWebIdentityToken = &v
@@ -2311,8 +2498,8 @@ type AssumedRoleUser struct {
Arn *string `min:"20" type:"string" required:"true"`
// A unique identifier that contains the role ID and the role session name of
- // the role that is being assumed. The role ID is generated by AWS when the
- // role is created.
+ // the role that is being assumed. The role ID is generated by Amazon Web Services
+ // when the role is created.
//
// AssumedRoleId is a required field
AssumedRoleId *string `min:"2" type:"string" required:"true"`
@@ -2340,7 +2527,7 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
return s
}
-// AWS credentials for API authentication.
+// Amazon Web Services credentials for API authentication.
type Credentials struct {
_ struct{} `type:"structure"`
@@ -2441,8 +2628,8 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut
}
// A document that contains additional information about the authorization status
-// of a request from an encoded message that is returned in response to an AWS
-// request.
+// of a request from an encoded message that is returned in response to an Amazon
+// Web Services request.
type DecodeAuthorizationMessageOutput struct {
_ struct{} `type:"structure"`
@@ -2554,7 +2741,7 @@ func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput
type GetAccessKeyInfoOutput struct {
_ struct{} `type:"structure"`
- // The number used to identify the AWS account.
+ // The number used to identify the Amazon Web Services account.
Account *string `type:"string"`
}
@@ -2593,11 +2780,11 @@ func (s GetCallerIdentityInput) GoString() string {
type GetCallerIdentityOutput struct {
_ struct{} `type:"structure"`
- // The AWS account ID number of the account that owns or contains the calling
- // entity.
+ // The Amazon Web Services account ID number of the account that owns or contains
+ // the calling entity.
Account *string `type:"string"`
- // The AWS ARN associated with the calling entity.
+ // The Amazon Web Services ARN associated with the calling entity.
Arn *string `min:"20" type:"string"`
// The unique identifier of the calling entity. The exact value depends on the
@@ -2641,9 +2828,10 @@ type GetFederationTokenInput struct {
// The duration, in seconds, that the session should last. Acceptable durations
// for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
// (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
- // using AWS account root user credentials are restricted to a maximum of 3,600
- // seconds (one hour). If the specified duration is longer than one hour, the
- // session obtained by using root user credentials defaults to one hour.
+ // using Amazon Web Services account root user credentials are restricted to
+ // a maximum of 3,600 seconds (one hour). If the specified duration is longer
+ // than one hour, the session obtained by using root user credentials defaults
+ // to one hour.
DurationSeconds *int64 `min:"900" type:"integer"`
// The name of the federated user. The name is used as an identifier for the
@@ -2682,17 +2870,17 @@ type GetFederationTokenInput struct {
// by the policy. These permissions are granted in addition to the permissions
// that are granted by the session policies.
//
- // The plain text that you use for both inline and managed session policies
- // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
- // character from the space character to the end of the valid character list
- // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
- // and carriage return (\u000D) characters.
- //
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -2702,11 +2890,11 @@ type GetFederationTokenInput struct {
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
- // use as managed session policies. The plain text that you use for both inline
+ // use as managed session policies. The plaintext that you use for both inline
// and managed session policies can't exceed 2,048 characters. You can provide
// up to 10 managed policy ARNs. For more information about ARNs, see Amazon
- // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the AWS General Reference.
+ // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the Amazon Web Services General Reference.
//
// This parameter is optional. However, if you do not pass any session policies,
// then the resulting federated user session has no permissions.
@@ -2725,11 +2913,11 @@ type GetFederationTokenInput struct {
// by the policy. These permissions are granted in addition to the permissions
// that are granted by the session policies.
//
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
PolicyArns []*PolicyDescriptorType `type:"list"`
// A list of session tags. Each session tag consists of a key name and an associated
@@ -2737,17 +2925,17 @@ type GetFederationTokenInput struct {
// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
- // This parameter is optional. You can pass up to 50 session tags. The plain
- // text session tag keys can’t exceed 128 characters and the values can’t
- // exceed 256 characters. For these and additional limits, see IAM and STS Character
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters and the values can’t exceed
+ // 256 characters. For these and additional limits, see IAM and STS Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed session policies
+ // and session tags into a packed binary format that has a separate limit. Your
+ // request can fail for this limit even if your plaintext meets the other requirements.
+ // The PackedPolicySize response element indicates by percentage how close the
+ // policies and tags for your request are to the upper size limit.
//
// You can pass a session tag with the same key as a tag that is already attached
// to the user you are federating. When you do, session tags override a user
@@ -2844,7 +3032,8 @@ func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput {
}
// Contains the response to a successful GetFederationToken request, including
-// temporary AWS credentials that can be used to make AWS requests.
+// temporary Amazon Web Services credentials that can be used to make Amazon
+// Web Services requests.
type GetFederationTokenOutput struct {
_ struct{} `type:"structure"`
@@ -2902,9 +3091,9 @@ type GetSessionTokenInput struct {
// The duration, in seconds, that the credentials should remain valid. Acceptable
// durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
// seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions
- // for AWS account owners are restricted to a maximum of 3,600 seconds (one
- // hour). If the duration is longer than one hour, the session for AWS account
- // owners defaults to one hour.
+ // for Amazon Web Services account owners are restricted to a maximum of 3,600
+ // seconds (one hour). If the duration is longer than one hour, the session
+ // for Amazon Web Services account owners defaults to one hour.
DurationSeconds *int64 `min:"900" type:"integer"`
// The identification number of the MFA device that is associated with the IAM
@@ -2912,7 +3101,7 @@ type GetSessionTokenInput struct {
// user has a policy that requires MFA authentication. The value is either the
// serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
// Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
- // You can find the device for an IAM user by going to the AWS Management Console
+ // You can find the device for an IAM user by going to the Management Console
// and viewing the user's security credentials.
//
// The regex used to validate this parameter is a string of characters consisting
@@ -2979,7 +3168,8 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
}
// Contains the response to a successful GetSessionToken request, including
-// temporary AWS credentials that can be used to make AWS requests.
+// temporary Amazon Web Services credentials that can be used to make Amazon
+// Web Services requests.
type GetSessionTokenOutput struct {
_ struct{} `type:"structure"`
@@ -3014,8 +3204,8 @@ type PolicyDescriptorType struct {
// The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
// policy for the role. For more information about ARNs, see Amazon Resource
- // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the AWS General Reference.
+ // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the Amazon Web Services General Reference.
Arn *string `locationName:"arn" min:"20" type:"string"`
}
@@ -3050,9 +3240,9 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
// You can pass custom key-value pair attributes when you assume a role or federate
// a user. These are called session tags. You can then use the session tags
-// to control access to resources. For more information, see Tagging AWS STS
-// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
+// to control access to resources. For more information, see Tagging STS Sessions
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in
+// the IAM User Guide.
type Tag struct {
_ struct{} `type:"structure"`
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
index cb1debbaa..2d98d9235 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -3,11 +3,11 @@
// Package sts provides the client and types for making API
// requests to AWS Security Token Service.
//
-// AWS Security Token Service (STS) enables you to request temporary, limited-privilege
-// credentials for AWS Identity and Access Management (IAM) users or for users
-// that you authenticate (federated users). This guide provides descriptions
-// of the STS API. For more information about using this service, see Temporary
-// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+// Security Token Service (STS) enables you to request temporary, limited-privilege
+// credentials for Identity and Access Management (IAM) users or for users that
+// you authenticate (federated users). This guide provides descriptions of the
+// STS API. For more information about using this service, see Temporary Security
+// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
//
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
index a233f542e..7897d70c8 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -42,8 +42,9 @@ const (
// ErrCodeInvalidIdentityTokenException for service response error code
// "InvalidIdentityToken".
//
- // The web identity token that was passed could not be validated by AWS. Get
- // a new identity token from the identity provider and then retry the request.
+ // The web identity token that was passed could not be validated by Amazon Web
+ // Services. Get a new identity token from the identity provider and then retry
+ // the request.
ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
// ErrCodeMalformedPolicyDocumentException for service response error code
@@ -57,11 +58,11 @@ const (
// "PackedPolicyTooLarge".
//
// The request was rejected because the total packed size of the session policies
- // and session tags combined was too large. An AWS conversion compresses the
- // session policy document, session policy ARNs, and session tags into a packed
- // binary format that has a separate limit. The error message indicates by percentage
- // how close the policies and tags are to the upper size limit. For more information,
- // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+ // and session tags combined was too large. An Amazon Web Services conversion
+ // compresses the session policy document, session policy ARNs, and session
+ // tags into a packed binary format that has a separate limit. The error message
+ // indicates by percentage how close the policies and tags are to the upper
+ // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// You could receive this error even though you meet other defined session policy
@@ -76,7 +77,8 @@ const (
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
- // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // and Deactivating Amazon Web Services STS in an Amazon Web Services Region
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
ErrCodeRegionDisabledException = "RegionDisabledException"
)
diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml
deleted file mode 100644
index c516ea88d..000000000
--- a/vendor/github.com/cespare/xxhash/v2/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-go:
- - "1.x"
- - master
-env:
- - TAGS=""
- - TAGS="-tags purego"
-script: go test $TAGS -v ./...
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 2fd8693c2..792b4a60b 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -1,7 +1,7 @@
# xxhash
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
@@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index db0b35fbe..15c835d54 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
- b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
index d580e32ae..be8db5bf7 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -6,7 +6,7 @@
// Register allocation:
// AX h
-// CX pointer to advance through b
+// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
@@ -16,39 +16,39 @@
// R12 tmp
// R13 prime1v
// R14 prime2v
-// R15 prime4v
+// DI prime4v
-// round reads from and advances the buffer pointer in CX.
+// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
+ MOVQ (SI), R12 \
+ ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
- ADDQ R15, acc
+ ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
+ MOVQ ·prime4v(SB), DI
// Load slice.
- MOVQ b_base+0(FP), CX
+ MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
+ LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
XORQ R11, R11
SUBQ R13, R11
- // Loop until CX > BX.
+ // Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
@@ -100,16 +100,16 @@ noBlocks:
afterBlocks:
ADDQ DX, AX
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
+ MOVQ (SI), R8
+ ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
@@ -117,18 +117,18 @@ wordLoop:
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
- ADDQ R15, AX
+ ADDQ DI, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG singles
- MOVL (CX), R8
- ADDQ $4, CX
+ MOVL (SI), R8
+ ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
@@ -138,19 +138,19 @@ fourByte:
singles:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JGE finalize
singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
+ MOVBQZX (SI), R12
+ ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JL singlesLoop
finalize:
@@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14
// Load slice.
- MOVQ b_base+8(FP), CX
+ MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
+ LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
@@ -199,7 +199,7 @@ blockLoop:
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
@@ -208,8 +208,8 @@ blockLoop:
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
- // The number of bytes written is CX minus the old base pointer.
- SUBQ b_base+8(FP), CX
- MOVQ CX, ret+32(FP)
+ // The number of bytes written is SI minus the old base pointer.
+ SUBQ b_base+8(FP), SI
+ MOVQ SI, ret+32(FP)
RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 53bf76efb..376e0ca2e 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -6,41 +6,52 @@
package xxhash
import (
- "reflect"
"unsafe"
)
-// Notes:
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
//
-// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
-// for some discussion about these unsafe conversions.
+// var b []byte
+// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+// bh.Len = len(s)
+// bh.Cap = len(s)
//
-// In the future it's possible that compiler optimizations will make these
-// unsafe operations unnecessary: https://golang.org/issue/2205.
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
//
-// Both of these wrapper functions still incur function call overhead since they
-// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
-// for strings to squeeze out a bit more speed. Mid-stack inlining should
-// eventually fix this.
+// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
+ b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return d.Write(b)
+ d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+ // d.Write always returns len(s), nil.
+ // Ignoring the return output and returning these fixed values buys a
+ // savings of 6 in the inliner's cost model.
+ return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+ s string
+ cap int
}
diff --git a/vendor/github.com/cncf/xds/go/LICENSE b/vendor/github.com/cncf/xds/go/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
new file mode 100644
index 000000000..5c77558cc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
@@ -0,0 +1,231 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: udpa/annotations/migrate.proto
+
+package udpa_annotations
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type MigrateAnnotation struct {
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MigrateAnnotation) Reset() { *m = MigrateAnnotation{} }
+func (m *MigrateAnnotation) String() string { return proto.CompactTextString(m) }
+func (*MigrateAnnotation) ProtoMessage() {}
+func (*MigrateAnnotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba8191732d0e246d, []int{0}
+}
+
+func (m *MigrateAnnotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MigrateAnnotation.Unmarshal(m, b)
+}
+func (m *MigrateAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MigrateAnnotation.Marshal(b, m, deterministic)
+}
+func (m *MigrateAnnotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MigrateAnnotation.Merge(m, src)
+}
+func (m *MigrateAnnotation) XXX_Size() int {
+ return xxx_messageInfo_MigrateAnnotation.Size(m)
+}
+func (m *MigrateAnnotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_MigrateAnnotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MigrateAnnotation proto.InternalMessageInfo
+
+func (m *MigrateAnnotation) GetRename() string {
+ if m != nil {
+ return m.Rename
+ }
+ return ""
+}
+
+type FieldMigrateAnnotation struct {
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+ OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FieldMigrateAnnotation) Reset() { *m = FieldMigrateAnnotation{} }
+func (m *FieldMigrateAnnotation) String() string { return proto.CompactTextString(m) }
+func (*FieldMigrateAnnotation) ProtoMessage() {}
+func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba8191732d0e246d, []int{1}
+}
+
+func (m *FieldMigrateAnnotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FieldMigrateAnnotation.Unmarshal(m, b)
+}
+func (m *FieldMigrateAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FieldMigrateAnnotation.Marshal(b, m, deterministic)
+}
+func (m *FieldMigrateAnnotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FieldMigrateAnnotation.Merge(m, src)
+}
+func (m *FieldMigrateAnnotation) XXX_Size() int {
+ return xxx_messageInfo_FieldMigrateAnnotation.Size(m)
+}
+func (m *FieldMigrateAnnotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_FieldMigrateAnnotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldMigrateAnnotation proto.InternalMessageInfo
+
+func (m *FieldMigrateAnnotation) GetRename() string {
+ if m != nil {
+ return m.Rename
+ }
+ return ""
+}
+
+func (m *FieldMigrateAnnotation) GetOneofPromotion() string {
+ if m != nil {
+ return m.OneofPromotion
+ }
+ return ""
+}
+
+type FileMigrateAnnotation struct {
+ MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FileMigrateAnnotation) Reset() { *m = FileMigrateAnnotation{} }
+func (m *FileMigrateAnnotation) String() string { return proto.CompactTextString(m) }
+func (*FileMigrateAnnotation) ProtoMessage() {}
+func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba8191732d0e246d, []int{2}
+}
+
+func (m *FileMigrateAnnotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FileMigrateAnnotation.Unmarshal(m, b)
+}
+func (m *FileMigrateAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FileMigrateAnnotation.Marshal(b, m, deterministic)
+}
+func (m *FileMigrateAnnotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FileMigrateAnnotation.Merge(m, src)
+}
+func (m *FileMigrateAnnotation) XXX_Size() int {
+ return xxx_messageInfo_FileMigrateAnnotation.Size(m)
+}
+func (m *FileMigrateAnnotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_FileMigrateAnnotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileMigrateAnnotation proto.InternalMessageInfo
+
+func (m *FileMigrateAnnotation) GetMoveToPackage() string {
+ if m != nil {
+ return m.MoveToPackage
+ }
+ return ""
+}
+
+var E_MessageMigrate = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.MessageOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.message_migrate",
+ Tag: "bytes,171962766,opt,name=message_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+}
+
+var E_FieldMigrate = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.FieldOptions)(nil),
+ ExtensionType: (*FieldMigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.field_migrate",
+ Tag: "bytes,171962766,opt,name=field_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+}
+
+var E_EnumMigrate = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.EnumOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.enum_migrate",
+ Tag: "bytes,171962766,opt,name=enum_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+}
+
+var E_EnumValueMigrate = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.EnumValueOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.enum_value_migrate",
+ Tag: "bytes,171962766,opt,name=enum_value_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+}
+
+var E_FileMigrate = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.FileOptions)(nil),
+ ExtensionType: (*FileMigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.file_migrate",
+ Tag: "bytes,171962766,opt,name=file_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+}
+
+func init() {
+ proto.RegisterType((*MigrateAnnotation)(nil), "udpa.annotations.MigrateAnnotation")
+ proto.RegisterType((*FieldMigrateAnnotation)(nil), "udpa.annotations.FieldMigrateAnnotation")
+ proto.RegisterType((*FileMigrateAnnotation)(nil), "udpa.annotations.FileMigrateAnnotation")
+ proto.RegisterExtension(E_MessageMigrate)
+ proto.RegisterExtension(E_FieldMigrate)
+ proto.RegisterExtension(E_EnumMigrate)
+ proto.RegisterExtension(E_EnumValueMigrate)
+ proto.RegisterExtension(E_FileMigrate)
+}
+
+func init() { proto.RegisterFile("udpa/annotations/migrate.proto", fileDescriptor_ba8191732d0e246d) }
+
+var fileDescriptor_ba8191732d0e246d = []byte{
+ // 349 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcd, 0x4a, 0xfb, 0x40,
+ 0x14, 0xc5, 0xe9, 0x7f, 0x51, 0xf8, 0x4f, 0x3f, 0x0d, 0x58, 0x8a, 0xf8, 0x51, 0x2b, 0xd8, 0x82,
+ 0x30, 0x01, 0xdd, 0x75, 0x23, 0x2e, 0xec, 0xae, 0x58, 0x83, 0x08, 0xae, 0xc2, 0xb4, 0xbd, 0x09,
+ 0xa1, 0x99, 0xb9, 0x43, 0x32, 0xa9, 0x6f, 0xe1, 0x4b, 0xfa, 0x20, 0xca, 0x4c, 0x92, 0xb6, 0x38,
+ 0x41, 0xa4, 0xcb, 0x9c, 0x7b, 0xef, 0xf9, 0xe5, 0x1c, 0x86, 0x9c, 0x67, 0x2b, 0xc9, 0x5c, 0x26,
+ 0x04, 0x2a, 0xa6, 0x22, 0x14, 0xa9, 0xcb, 0xa3, 0x30, 0x61, 0x0a, 0xa8, 0x4c, 0x50, 0xa1, 0xd3,
+ 0xd5, 0x73, 0xba, 0x37, 0x3f, 0x19, 0x84, 0x88, 0x61, 0x0c, 0xae, 0x99, 0x2f, 0xb2, 0xc0, 0x5d,
+ 0x41, 0xba, 0x4c, 0x22, 0xa9, 0x30, 0xc9, 0x6f, 0x86, 0x37, 0xe4, 0x68, 0x96, 0x9b, 0x3c, 0x6c,
+ 0xef, 0x9c, 0x1e, 0xa9, 0x27, 0x20, 0x18, 0x87, 0x7e, 0x6d, 0x50, 0x1b, 0xff, 0xf7, 0x8a, 0xaf,
+ 0xe1, 0x1b, 0xe9, 0x4d, 0x23, 0x88, 0x57, 0x7f, 0xbe, 0x70, 0x46, 0xa4, 0x83, 0x02, 0x30, 0xf0,
+ 0x65, 0x82, 0x1c, 0xf5, 0x6a, 0xff, 0x9f, 0x59, 0x68, 0x1b, 0x79, 0x5e, 0xaa, 0xc3, 0x7b, 0x72,
+ 0x3c, 0x8d, 0x62, 0xb0, 0x9d, 0xaf, 0x49, 0x87, 0xe3, 0x06, 0x7c, 0x85, 0xbe, 0x64, 0xcb, 0x35,
+ 0x0b, 0xa1, 0x70, 0x68, 0x69, 0xf9, 0x05, 0xe7, 0xb9, 0x38, 0x91, 0xa4, 0xc3, 0x21, 0x4d, 0x59,
+ 0x08, 0x7e, 0xd1, 0x8a, 0x73, 0x41, 0xf3, 0xf8, 0xb4, 0x8c, 0x4f, 0x67, 0xf9, 0xc6, 0x93, 0x34,
+ 0xf5, 0xf4, 0x3f, 0x3e, 0xbf, 0x9e, 0x07, 0xb5, 0x71, 0xe3, 0xf6, 0x8a, 0xfe, 0xac, 0x8e, 0x5a,
+ 0x7f, 0xe2, 0xb5, 0x0b, 0xff, 0x62, 0x32, 0x41, 0xd2, 0x0a, 0x74, 0x1b, 0x5b, 0xde, 0x99, 0xc5,
+ 0x33, 0x6d, 0x59, 0xb4, 0xb1, 0x4d, 0xab, 0xae, 0xd5, 0x6b, 0x06, 0x7b, 0xfa, 0x24, 0x24, 0x4d,
+ 0x10, 0x19, 0xdf, 0xf2, 0x4e, 0x2d, 0xde, 0xa3, 0xc8, 0xf8, 0x61, 0xe1, 0x1a, 0xda, 0xb9, 0x04,
+ 0xbd, 0x13, 0xc7, 0x80, 0x36, 0x2c, 0xce, 0x76, 0x75, 0x5e, 0x56, 0xe2, 0x5e, 0xf5, 0xce, 0x61,
+ 0xcc, 0x2e, 0x94, 0xf7, 0x25, 0x78, 0x4d, 0x9a, 0x41, 0x14, 0xc3, 0x2f, 0x09, 0xf5, 0x23, 0xb1,
+ 0x68, 0xa3, 0xaa, 0x42, 0x2b, 0x1e, 0x93, 0xd7, 0x08, 0x76, 0xf2, 0xa2, 0x6e, 0x4c, 0xef, 0xbe,
+ 0x03, 0x00, 0x00, 0xff, 0xff, 0x62, 0x65, 0xc8, 0x45, 0x57, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
new file mode 100644
index 000000000..6da3f833c
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
@@ -0,0 +1,246 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/migrate.proto
+
+package udpa_annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _migrate_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on MigrateAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *MigrateAnnotation) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Rename
+
+ return nil
+}
+
+// MigrateAnnotationValidationError is the validation error returned by
+// MigrateAnnotation.Validate if the designated constraints aren't met.
+type MigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MigrateAnnotationValidationError) ErrorName() string {
+ return "MigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MigrateAnnotationValidationError{}
+
+// Validate checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *FieldMigrateAnnotation) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Rename
+
+ // no validation rules for OneofPromotion
+
+ return nil
+}
+
+// FieldMigrateAnnotationValidationError is the validation error returned by
+// FieldMigrateAnnotation.Validate if the designated constraints aren't met.
+type FieldMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldMigrateAnnotationValidationError) ErrorName() string {
+ return "FieldMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldMigrateAnnotationValidationError{}
+
+// Validate checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *FileMigrateAnnotation) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for MoveToPackage
+
+ return nil
+}
+
+// FileMigrateAnnotationValidationError is the validation error returned by
+// FileMigrateAnnotation.Validate if the designated constraints aren't met.
+type FileMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FileMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FileMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FileMigrateAnnotationValidationError) ErrorName() string {
+ return "FileMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FileMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFileMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FileMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FileMigrateAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
new file mode 100644
index 000000000..e99e20c0a
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: udpa/annotations/security.proto
+
+package udpa_annotations
+
+import (
+ fmt "fmt"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ _ "github.com/golang/protobuf/ptypes/any"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type FieldSecurityAnnotation struct {
+ ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"`
+ ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FieldSecurityAnnotation) Reset() { *m = FieldSecurityAnnotation{} }
+func (m *FieldSecurityAnnotation) String() string { return proto.CompactTextString(m) }
+func (*FieldSecurityAnnotation) ProtoMessage() {}
+func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_43b150013eccfb0a, []int{0}
+}
+
+func (m *FieldSecurityAnnotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FieldSecurityAnnotation.Unmarshal(m, b)
+}
+func (m *FieldSecurityAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FieldSecurityAnnotation.Marshal(b, m, deterministic)
+}
+func (m *FieldSecurityAnnotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FieldSecurityAnnotation.Merge(m, src)
+}
+func (m *FieldSecurityAnnotation) XXX_Size() int {
+ return xxx_messageInfo_FieldSecurityAnnotation.Size(m)
+}
+func (m *FieldSecurityAnnotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_FieldSecurityAnnotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldSecurityAnnotation proto.InternalMessageInfo
+
+func (m *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool {
+ if m != nil {
+ return m.ConfigureForUntrustedDownstream
+ }
+ return false
+}
+
+func (m *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool {
+ if m != nil {
+ return m.ConfigureForUntrustedUpstream
+ }
+ return false
+}
+
+var E_Security = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.FieldOptions)(nil),
+ ExtensionType: (*FieldSecurityAnnotation)(nil),
+ Field: 11122993,
+ Name: "udpa.annotations.security",
+ Tag: "bytes,11122993,opt,name=security",
+ Filename: "udpa/annotations/security.proto",
+}
+
+func init() {
+ proto.RegisterType((*FieldSecurityAnnotation)(nil), "udpa.annotations.FieldSecurityAnnotation")
+ proto.RegisterExtension(E_Security)
+}
+
+func init() { proto.RegisterFile("udpa/annotations/security.proto", fileDescriptor_43b150013eccfb0a) }
+
+var fileDescriptor_43b150013eccfb0a = []byte{
+ // 276 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xb1, 0x4a, 0x03, 0x31,
+ 0x18, 0xc7, 0xb9, 0x82, 0xe5, 0x88, 0x8b, 0xdc, 0xd2, 0x5a, 0x38, 0x7a, 0x74, 0xaa, 0x4b, 0x02,
+ 0xba, 0xb9, 0x29, 0x52, 0x07, 0x07, 0xa1, 0xd2, 0xf9, 0x48, 0x2f, 0xb9, 0x23, 0x70, 0xe6, 0x0b,
+ 0xc9, 0x17, 0xc5, 0xcd, 0xa7, 0x71, 0x15, 0x57, 0x9f, 0xc0, 0xd5, 0x57, 0xf1, 0x09, 0xc4, 0x5c,
+ 0xee, 0x90, 0x6a, 0xb7, 0x84, 0xef, 0xf7, 0xfd, 0x92, 0xff, 0x9f, 0xcc, 0xbd, 0x30, 0x9c, 0x71,
+ 0xad, 0x01, 0x39, 0x2a, 0xd0, 0x8e, 0x39, 0x59, 0x79, 0xab, 0xf0, 0x89, 0x1a, 0x0b, 0x08, 0xd9,
+ 0xd1, 0x0f, 0x40, 0x7f, 0x01, 0xb3, 0xfc, 0xef, 0x0a, 0x72, 0xf4, 0xae, 0x5b, 0x98, 0x1d, 0x37,
+ 0x00, 0x4d, 0x2b, 0x59, 0xb8, 0x6d, 0x7d, 0xcd, 0xb8, 0x8e, 0xae, 0x59, 0xb1, 0x3b, 0x12, 0xd2,
+ 0x55, 0x56, 0x19, 0x04, 0x1b, 0x89, 0xc9, 0x03, 0x6f, 0x95, 0xe0, 0x28, 0x59, 0x7f, 0xe8, 0x06,
+ 0x8b, 0xd7, 0x84, 0x4c, 0x56, 0x4a, 0xb6, 0xe2, 0x2e, 0x7e, 0xef, 0x62, 0x78, 0x3f, 0xbb, 0x21,
+ 0x8b, 0x0a, 0x74, 0xad, 0x1a, 0x6f, 0x65, 0x59, 0x83, 0x2d, 0xbd, 0x46, 0xeb, 0x1d, 0x4a, 0x51,
+ 0x0a, 0x78, 0xd4, 0x0e, 0xad, 0xe4, 0xf7, 0xd3, 0xa4, 0x48, 0x96, 0xe9, 0x7a, 0x3e, 0x90, 0x2b,
+ 0xb0, 0x9b, 0x9e, 0xbb, 0x1a, 0xb0, 0xec, 0x9a, 0x14, 0xfb, 0x64, 0xde, 0x44, 0xd5, 0x28, 0xa8,
+ 0xf2, 0x7f, 0x55, 0x9b, 0x08, 0x9d, 0x37, 0x24, 0xed, 0xab, 0xcc, 0x72, 0xda, 0x25, 0xa7, 0x7d,
+ 0x72, 0x1a, 0xb2, 0xdc, 0x9a, 0xd0, 0xdf, 0xf4, 0xed, 0xeb, 0xe5, 0xa0, 0x48, 0x96, 0x87, 0xa7,
+ 0x27, 0x74, 0xb7, 0x6d, 0xba, 0x27, 0xf4, 0x7a, 0x90, 0x5f, 0xa6, 0xef, 0xcf, 0x1f, 0x9f, 0xe3,
+ 0x51, 0x9a, 0x6c, 0xc7, 0x41, 0x7f, 0xf6, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xf3, 0x6a, 0x56,
+ 0xd5, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
new file mode 100644
index 000000000..7ba30fd13
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
@@ -0,0 +1,108 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/security.proto
+
+package udpa_annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _security_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on FieldSecurityAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *FieldSecurityAnnotation) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for ConfigureForUntrustedDownstream
+
+ // no validation rules for ConfigureForUntrustedUpstream
+
+ return nil
+}
+
+// FieldSecurityAnnotationValidationError is the validation error returned by
+// FieldSecurityAnnotation.Validate if the designated constraints aren't met.
+type FieldSecurityAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldSecurityAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldSecurityAnnotationValidationError) ErrorName() string {
+ return "FieldSecurityAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldSecurityAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldSecurityAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldSecurityAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldSecurityAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
new file mode 100644
index 000000000..90bc12c5a
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
@@ -0,0 +1,50 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: udpa/annotations/sensitive.proto
+
+package udpa_annotations
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+var E_Sensitive = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 76569463,
+ Name: "udpa.annotations.sensitive",
+ Tag: "varint,76569463,opt,name=sensitive",
+ Filename: "udpa/annotations/sensitive.proto",
+}
+
+func init() {
+ proto.RegisterExtension(E_Sensitive)
+}
+
+func init() { proto.RegisterFile("udpa/annotations/sensitive.proto", fileDescriptor_abbd0dde0408189d) }
+
+var fileDescriptor_abbd0dde0408189d = []byte{
+ // 134 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x28, 0x4d, 0x29, 0x48,
+ 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2f, 0x4e, 0xcd,
+ 0x2b, 0xce, 0x2c, 0xc9, 0x2c, 0x4b, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x00, 0xa9,
+ 0xd0, 0x43, 0x52, 0x21, 0xa5, 0x90, 0x9e, 0x9f, 0x9f, 0x9e, 0x93, 0xaa, 0x0f, 0x96, 0x4f, 0x2a,
+ 0x4d, 0xd3, 0x4f, 0x49, 0x2d, 0x4e, 0x2e, 0xca, 0x2c, 0x28, 0xc9, 0x2f, 0x82, 0xe8, 0xb1, 0xb2,
+ 0xe3, 0xe2, 0x84, 0x1b, 0x23, 0x24, 0xab, 0x07, 0x51, 0xaf, 0x07, 0x53, 0xaf, 0xe7, 0x96, 0x99,
+ 0x9a, 0x93, 0xe2, 0x5f, 0x00, 0x36, 0x4d, 0xe2, 0xfb, 0xb6, 0x83, 0x2a, 0x0a, 0x8c, 0x1a, 0x1c,
+ 0x41, 0x08, 0x2d, 0x49, 0x6c, 0x60, 0xa5, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xba,
+ 0xeb, 0x73, 0x9e, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
new file mode 100644
index 000000000..8b2b3f8c7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
@@ -0,0 +1,37 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/sensitive.proto
+
+package udpa_annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _sensitive_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
new file mode 100644
index 000000000..a9c0c6f0a
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
@@ -0,0 +1,141 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: udpa/annotations/status.proto
+
+package udpa_annotations
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type PackageVersionStatus int32
+
+const (
+ PackageVersionStatus_UNKNOWN PackageVersionStatus = 0
+ PackageVersionStatus_FROZEN PackageVersionStatus = 1
+ PackageVersionStatus_ACTIVE PackageVersionStatus = 2
+ PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3
+)
+
+var PackageVersionStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "FROZEN",
+ 2: "ACTIVE",
+ 3: "NEXT_MAJOR_VERSION_CANDIDATE",
+}
+
+var PackageVersionStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "FROZEN": 1,
+ "ACTIVE": 2,
+ "NEXT_MAJOR_VERSION_CANDIDATE": 3,
+}
+
+func (x PackageVersionStatus) String() string {
+ return proto.EnumName(PackageVersionStatus_name, int32(x))
+}
+
+func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_011cc2e7e491b0ff, []int{0}
+}
+
+type StatusAnnotation struct {
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+ PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=udpa.annotations.PackageVersionStatus" json:"package_version_status,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StatusAnnotation) Reset() { *m = StatusAnnotation{} }
+func (m *StatusAnnotation) String() string { return proto.CompactTextString(m) }
+func (*StatusAnnotation) ProtoMessage() {}
+func (*StatusAnnotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_011cc2e7e491b0ff, []int{0}
+}
+
+func (m *StatusAnnotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StatusAnnotation.Unmarshal(m, b)
+}
+func (m *StatusAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StatusAnnotation.Marshal(b, m, deterministic)
+}
+func (m *StatusAnnotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatusAnnotation.Merge(m, src)
+}
+func (m *StatusAnnotation) XXX_Size() int {
+ return xxx_messageInfo_StatusAnnotation.Size(m)
+}
+func (m *StatusAnnotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatusAnnotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusAnnotation proto.InternalMessageInfo
+
+func (m *StatusAnnotation) GetWorkInProgress() bool {
+ if m != nil {
+ return m.WorkInProgress
+ }
+ return false
+}
+
+func (m *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus {
+ if m != nil {
+ return m.PackageVersionStatus
+ }
+ return PackageVersionStatus_UNKNOWN
+}
+
+var E_FileStatus = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.FileOptions)(nil),
+ ExtensionType: (*StatusAnnotation)(nil),
+ Field: 222707719,
+ Name: "udpa.annotations.file_status",
+ Tag: "bytes,222707719,opt,name=file_status",
+ Filename: "udpa/annotations/status.proto",
+}
+
+func init() {
+ proto.RegisterEnum("udpa.annotations.PackageVersionStatus", PackageVersionStatus_name, PackageVersionStatus_value)
+ proto.RegisterType((*StatusAnnotation)(nil), "udpa.annotations.StatusAnnotation")
+ proto.RegisterExtension(E_FileStatus)
+}
+
+func init() { proto.RegisterFile("udpa/annotations/status.proto", fileDescriptor_011cc2e7e491b0ff) }
+
+var fileDescriptor_011cc2e7e491b0ff = []byte{
+ // 305 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x50, 0x4f, 0x4b, 0xc3, 0x30,
+ 0x1c, 0xb5, 0x13, 0xa6, 0x64, 0x30, 0x42, 0x18, 0x32, 0x64, 0x42, 0xd9, 0x41, 0x8a, 0x87, 0x14,
+ 0xe6, 0xcd, 0x5b, 0xd9, 0x3a, 0xa8, 0x62, 0x3a, 0xba, 0x39, 0x45, 0x94, 0xd0, 0x6d, 0x59, 0x89,
+ 0x1b, 0x49, 0x48, 0x32, 0xbd, 0x7a, 0xf3, 0x33, 0xe8, 0xa7, 0x95, 0x36, 0x53, 0x64, 0xee, 0x16,
+ 0xde, 0xbf, 0xbc, 0xdf, 0x03, 0x67, 0x9b, 0x85, 0xca, 0xc3, 0x5c, 0x08, 0x69, 0x73, 0xcb, 0xa5,
+ 0x30, 0xa1, 0xb1, 0xb9, 0xdd, 0x18, 0xac, 0xb4, 0xb4, 0x12, 0xc1, 0x92, 0xc6, 0x7f, 0xe8, 0x53,
+ 0xbf, 0x90, 0xb2, 0x58, 0xb3, 0xb0, 0xe2, 0x67, 0x9b, 0x65, 0xb8, 0x60, 0x66, 0xae, 0xb9, 0xb2,
+ 0x52, 0x3b, 0x4f, 0xf7, 0xcb, 0x03, 0x70, 0x5c, 0x85, 0x44, 0xbf, 0x3e, 0x14, 0x00, 0xf8, 0x26,
+ 0xf5, 0x8a, 0x72, 0x41, 0x95, 0x96, 0x85, 0x66, 0xc6, 0xb4, 0x3d, 0xdf, 0x0b, 0x8e, 0xb3, 0x66,
+ 0x89, 0x27, 0x62, 0xb4, 0x45, 0xd1, 0x13, 0x38, 0x51, 0xf9, 0x7c, 0x95, 0x17, 0x8c, 0xbe, 0x32,
+ 0x6d, 0xb8, 0x14, 0xd4, 0x55, 0x6a, 0xd7, 0x7c, 0x2f, 0x68, 0xf6, 0xce, 0xf1, 0x6e, 0x27, 0x3c,
+ 0x72, 0xfa, 0xa9, 0x93, 0xbb, 0xbf, 0xb3, 0x96, 0xda, 0x83, 0x5e, 0x3c, 0x83, 0xd6, 0x3e, 0x35,
+ 0x6a, 0x80, 0xa3, 0x3b, 0x72, 0x43, 0xd2, 0x7b, 0x02, 0x0f, 0x10, 0x00, 0xf5, 0x61, 0x96, 0x3e,
+ 0xc6, 0x04, 0x7a, 0xe5, 0x3b, 0xea, 0x4f, 0x92, 0x69, 0x0c, 0x6b, 0xc8, 0x07, 0x1d, 0x12, 0x3f,
+ 0x4c, 0xe8, 0x6d, 0x74, 0x9d, 0x66, 0x74, 0x1a, 0x67, 0xe3, 0x24, 0x25, 0xb4, 0x1f, 0x91, 0x41,
+ 0x32, 0x88, 0x26, 0x31, 0x3c, 0xbc, 0x5a, 0x80, 0xc6, 0x92, 0xaf, 0xd9, 0xb6, 0x31, 0xea, 0x60,
+ 0xb7, 0x16, 0xfe, 0x59, 0x0b, 0x0f, 0xf9, 0x9a, 0xa5, 0xaa, 0xaa, 0xdd, 0xfe, 0x78, 0xff, 0x7c,
+ 0xf1, 0xbd, 0xa0, 0xd1, 0xeb, 0xfe, 0x3f, 0x69, 0x77, 0xc0, 0x0c, 0x94, 0xb9, 0x0e, 0x9d, 0xd5,
+ 0xab, 0xb8, 0xcb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x58, 0x12, 0xee, 0xf3, 0xbd, 0x01, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
new file mode 100644
index 000000000..4e122e040
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
@@ -0,0 +1,106 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/status.proto
+
+package udpa_annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _status_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on StatusAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *StatusAnnotation) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for WorkInProgress
+
+ // no validation rules for PackageVersionStatus
+
+ return nil
+}
+
+// StatusAnnotationValidationError is the validation error returned by
+// StatusAnnotation.Validate if the designated constraints aren't met.
+type StatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StatusAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
new file mode 100644
index 000000000..4c414ce5e
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
@@ -0,0 +1,94 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: udpa/annotations/versioning.proto
+
+package udpa_annotations
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type VersioningAnnotation struct {
+ PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *VersioningAnnotation) Reset() { *m = VersioningAnnotation{} }
+func (m *VersioningAnnotation) String() string { return proto.CompactTextString(m) }
+func (*VersioningAnnotation) ProtoMessage() {}
+func (*VersioningAnnotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5bc0544382e16cfc, []int{0}
+}
+
+func (m *VersioningAnnotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_VersioningAnnotation.Unmarshal(m, b)
+}
+func (m *VersioningAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_VersioningAnnotation.Marshal(b, m, deterministic)
+}
+func (m *VersioningAnnotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VersioningAnnotation.Merge(m, src)
+}
+func (m *VersioningAnnotation) XXX_Size() int {
+ return xxx_messageInfo_VersioningAnnotation.Size(m)
+}
+func (m *VersioningAnnotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_VersioningAnnotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VersioningAnnotation proto.InternalMessageInfo
+
+func (m *VersioningAnnotation) GetPreviousMessageType() string {
+ if m != nil {
+ return m.PreviousMessageType
+ }
+ return ""
+}
+
+var E_Versioning = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.MessageOptions)(nil),
+ ExtensionType: (*VersioningAnnotation)(nil),
+ Field: 7881811,
+ Name: "udpa.annotations.versioning",
+ Tag: "bytes,7881811,opt,name=versioning",
+ Filename: "udpa/annotations/versioning.proto",
+}
+
+func init() {
+ proto.RegisterType((*VersioningAnnotation)(nil), "udpa.annotations.VersioningAnnotation")
+ proto.RegisterExtension(E_Versioning)
+}
+
+func init() { proto.RegisterFile("udpa/annotations/versioning.proto", fileDescriptor_5bc0544382e16cfc) }
+
+var fileDescriptor_5bc0544382e16cfc = []byte{
+ // 193 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2c, 0x4d, 0x29, 0x48,
+ 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2f, 0x4b, 0x2d,
+ 0x2a, 0xce, 0xcc, 0xcf, 0xcb, 0xcc, 0x4b, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x00,
+ 0x29, 0xd1, 0x43, 0x52, 0x22, 0xa5, 0x90, 0x9e, 0x9f, 0x9f, 0x9e, 0x93, 0xaa, 0x0f, 0x96, 0x4f,
+ 0x2a, 0x4d, 0xd3, 0x4f, 0x49, 0x2d, 0x4e, 0x2e, 0xca, 0x2c, 0x28, 0xc9, 0x2f, 0x82, 0xe8, 0x51,
+ 0xf2, 0xe2, 0x12, 0x09, 0x83, 0x9b, 0xe3, 0x08, 0xd7, 0x2a, 0x64, 0xc4, 0x25, 0x5a, 0x50, 0x94,
+ 0x5a, 0x96, 0x99, 0x5f, 0x5a, 0x1c, 0x9f, 0x9b, 0x5a, 0x5c, 0x9c, 0x98, 0x9e, 0x1a, 0x5f, 0x52,
+ 0x59, 0x90, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x24, 0x0c, 0x93, 0xf4, 0x85, 0xc8, 0x85,
+ 0x54, 0x16, 0xa4, 0x5a, 0x65, 0x71, 0x71, 0x21, 0xdc, 0x24, 0x24, 0xaf, 0x07, 0xb1, 0x5c, 0x0f,
+ 0x66, 0xb9, 0x1e, 0x54, 0xad, 0x7f, 0x01, 0xd8, 0x71, 0x12, 0x97, 0x3b, 0x1e, 0x32, 0x2b, 0x30,
+ 0x6a, 0x70, 0x1b, 0xa9, 0xe9, 0xa1, 0x3b, 0x5c, 0x0f, 0x9b, 0x9b, 0x82, 0x90, 0x4c, 0x4f, 0x62,
+ 0x03, 0x9b, 0x6a, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x9c, 0xb8, 0x85, 0x17, 0x01, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
new file mode 100644
index 000000000..b1619e448
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
@@ -0,0 +1,106 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/versioning.proto
+
+package udpa_annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _versioning_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *VersioningAnnotation) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for PreviousMessageType
+
+ return nil
+}
+
+// VersioningAnnotationValidationError is the validation error returned by
+// VersioningAnnotation.Validate if the designated constraints aren't met.
+type VersioningAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e VersioningAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e VersioningAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e VersioningAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e VersioningAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e VersioningAnnotationValidationError) ErrorName() string {
+ return "VersioningAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e VersioningAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sVersioningAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = VersioningAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = VersioningAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
new file mode 100644
index 000000000..af16c9b1b
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
@@ -0,0 +1,84 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: xds/core/v3/authority.proto
+
+package xds_core_v3
+
+import (
+ fmt "fmt"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Authority struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Authority) Reset() { *m = Authority{} }
+func (m *Authority) String() string { return proto.CompactTextString(m) }
+func (*Authority) ProtoMessage() {}
+func (*Authority) Descriptor() ([]byte, []int) {
+ return fileDescriptor_74635363e1fbb077, []int{0}
+}
+
+func (m *Authority) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Authority.Unmarshal(m, b)
+}
+func (m *Authority) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Authority.Marshal(b, m, deterministic)
+}
+func (m *Authority) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Authority.Merge(m, src)
+}
+func (m *Authority) XXX_Size() int {
+ return xxx_messageInfo_Authority.Size(m)
+}
+func (m *Authority) XXX_DiscardUnknown() {
+ xxx_messageInfo_Authority.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Authority proto.InternalMessageInfo
+
+func (m *Authority) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*Authority)(nil), "xds.core.v3.Authority")
+}
+
+func init() { proto.RegisterFile("xds/core/v3/authority.proto", fileDescriptor_74635363e1fbb077) }
+
+var fileDescriptor_74635363e1fbb077 = []byte{
+ // 182 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xae, 0x48, 0x29, 0xd6,
+ 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x2f, 0x33, 0xd6, 0x4f, 0x2c, 0x2d, 0xc9, 0xc8, 0x2f, 0xca, 0x2c,
+ 0xa9, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xae, 0x48, 0x29, 0xd6, 0x03, 0x49, 0xea,
+ 0x95, 0x19, 0x4b, 0xc9, 0x96, 0xa6, 0x14, 0x24, 0xea, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96,
+ 0x64, 0xe6, 0xe7, 0x15, 0xeb, 0x17, 0x97, 0x24, 0x96, 0x94, 0x16, 0x43, 0xd4, 0x4a, 0x89, 0x97,
+ 0x25, 0xe6, 0x64, 0xa6, 0x24, 0x96, 0xa4, 0xea, 0xc3, 0x18, 0x10, 0x09, 0x25, 0x0d, 0x2e, 0x4e,
+ 0x47, 0x98, 0xb9, 0x42, 0xd2, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a,
+ 0x9c, 0x4e, 0xec, 0xbf, 0x9c, 0x58, 0x8a, 0x98, 0x04, 0x18, 0x83, 0xc0, 0x82, 0x4e, 0xe6, 0xbb,
+ 0x1a, 0x4e, 0x5c, 0x64, 0x63, 0xe2, 0x60, 0xe4, 0x92, 0x4e, 0xce, 0xcf, 0xd5, 0x4b, 0xcf, 0x2c,
+ 0xc9, 0x28, 0x4d, 0xd2, 0x03, 0x59, 0xab, 0x87, 0xe4, 0x10, 0x27, 0x3e, 0xb8, 0x71, 0x01, 0x20,
+ 0x0b, 0x02, 0x18, 0x93, 0xd8, 0xc0, 0x36, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x21,
+ 0x66, 0x70, 0xcd, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
new file mode 100644
index 000000000..05dc74e6f
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
@@ -0,0 +1,108 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/authority.proto
+
+package xds_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _authority_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on Authority with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Authority) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ return AuthorityValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ return nil
+}
+
+// AuthorityValidationError is the validation error returned by
+// Authority.Validate if the designated constraints aren't met.
+type AuthorityValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AuthorityValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AuthorityValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AuthorityValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AuthorityValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AuthorityValidationError) ErrorName() string { return "AuthorityValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AuthorityValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAuthority.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AuthorityValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AuthorityValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
new file mode 100644
index 000000000..c44a41f08
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
@@ -0,0 +1,194 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: xds/core/v3/collection_entry.proto
+
+package xds_core_v3
+
+import (
+ fmt "fmt"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type CollectionEntry struct {
+ // Types that are valid to be assigned to ResourceSpecifier:
+ // *CollectionEntry_Locator
+ // *CollectionEntry_InlineEntry_
+ ResourceSpecifier isCollectionEntry_ResourceSpecifier `protobuf_oneof:"resource_specifier"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CollectionEntry) Reset() { *m = CollectionEntry{} }
+func (m *CollectionEntry) String() string { return proto.CompactTextString(m) }
+func (*CollectionEntry) ProtoMessage() {}
+func (*CollectionEntry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5b15c821e5994c90, []int{0}
+}
+
+func (m *CollectionEntry) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CollectionEntry.Unmarshal(m, b)
+}
+func (m *CollectionEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CollectionEntry.Marshal(b, m, deterministic)
+}
+func (m *CollectionEntry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CollectionEntry.Merge(m, src)
+}
+func (m *CollectionEntry) XXX_Size() int {
+ return xxx_messageInfo_CollectionEntry.Size(m)
+}
+func (m *CollectionEntry) XXX_DiscardUnknown() {
+ xxx_messageInfo_CollectionEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CollectionEntry proto.InternalMessageInfo
+
+type isCollectionEntry_ResourceSpecifier interface {
+ isCollectionEntry_ResourceSpecifier()
+}
+
+type CollectionEntry_Locator struct {
+ Locator *ResourceLocator `protobuf:"bytes,1,opt,name=locator,proto3,oneof"`
+}
+
+type CollectionEntry_InlineEntry_ struct {
+ InlineEntry *CollectionEntry_InlineEntry `protobuf:"bytes,2,opt,name=inline_entry,json=inlineEntry,proto3,oneof"`
+}
+
+func (*CollectionEntry_Locator) isCollectionEntry_ResourceSpecifier() {}
+
+func (*CollectionEntry_InlineEntry_) isCollectionEntry_ResourceSpecifier() {}
+
+func (m *CollectionEntry) GetResourceSpecifier() isCollectionEntry_ResourceSpecifier {
+ if m != nil {
+ return m.ResourceSpecifier
+ }
+ return nil
+}
+
+func (m *CollectionEntry) GetLocator() *ResourceLocator {
+ if x, ok := m.GetResourceSpecifier().(*CollectionEntry_Locator); ok {
+ return x.Locator
+ }
+ return nil
+}
+
+func (m *CollectionEntry) GetInlineEntry() *CollectionEntry_InlineEntry {
+ if x, ok := m.GetResourceSpecifier().(*CollectionEntry_InlineEntry_); ok {
+ return x.InlineEntry
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*CollectionEntry) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*CollectionEntry_Locator)(nil),
+ (*CollectionEntry_InlineEntry_)(nil),
+ }
+}
+
+type CollectionEntry_InlineEntry struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Resource *any.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CollectionEntry_InlineEntry) Reset() { *m = CollectionEntry_InlineEntry{} }
+func (m *CollectionEntry_InlineEntry) String() string { return proto.CompactTextString(m) }
+func (*CollectionEntry_InlineEntry) ProtoMessage() {}
+func (*CollectionEntry_InlineEntry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5b15c821e5994c90, []int{0, 0}
+}
+
+func (m *CollectionEntry_InlineEntry) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CollectionEntry_InlineEntry.Unmarshal(m, b)
+}
+func (m *CollectionEntry_InlineEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CollectionEntry_InlineEntry.Marshal(b, m, deterministic)
+}
+func (m *CollectionEntry_InlineEntry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CollectionEntry_InlineEntry.Merge(m, src)
+}
+func (m *CollectionEntry_InlineEntry) XXX_Size() int {
+ return xxx_messageInfo_CollectionEntry_InlineEntry.Size(m)
+}
+func (m *CollectionEntry_InlineEntry) XXX_DiscardUnknown() {
+ xxx_messageInfo_CollectionEntry_InlineEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CollectionEntry_InlineEntry proto.InternalMessageInfo
+
+func (m *CollectionEntry_InlineEntry) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CollectionEntry_InlineEntry) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *CollectionEntry_InlineEntry) GetResource() *any.Any {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*CollectionEntry)(nil), "xds.core.v3.CollectionEntry")
+ proto.RegisterType((*CollectionEntry_InlineEntry)(nil), "xds.core.v3.CollectionEntry.InlineEntry")
+}
+
+func init() { proto.RegisterFile("xds/core/v3/collection_entry.proto", fileDescriptor_5b15c821e5994c90) }
+
+var fileDescriptor_5b15c821e5994c90 = []byte{
+ // 375 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xcf, 0x6b, 0xdb, 0x30,
+ 0x1c, 0xc5, 0xe3, 0x64, 0xe4, 0x87, 0x3c, 0x18, 0x88, 0x8c, 0x38, 0x59, 0x06, 0x23, 0xec, 0x10,
+ 0x18, 0x96, 0x42, 0x72, 0xd9, 0x06, 0x3b, 0xc4, 0x63, 0x90, 0xc1, 0x06, 0xc1, 0xc7, 0x36, 0x6d,
+ 0x50, 0x6c, 0x25, 0x15, 0x38, 0x52, 0x90, 0x64, 0x93, 0xf4, 0x50, 0x7a, 0xef, 0x7f, 0xd4, 0x6b,
+ 0x2f, 0xbd, 0xf6, 0xbf, 0x29, 0x3d, 0x15, 0xcb, 0x76, 0x9a, 0xe4, 0x26, 0xf1, 0x3e, 0x4f, 0xef,
+ 0x7d, 0xf5, 0x05, 0xbd, 0x6d, 0xa8, 0x70, 0x20, 0x24, 0xc5, 0xc9, 0x08, 0x07, 0x22, 0x8a, 0x68,
+ 0xa0, 0x99, 0xe0, 0x73, 0xca, 0xb5, 0xdc, 0xa1, 0x8d, 0x14, 0x5a, 0x40, 0x7b, 0x1b, 0x2a, 0x94,
+ 0x32, 0x28, 0x19, 0x75, 0xda, 0x2b, 0x21, 0x56, 0x11, 0xc5, 0x46, 0x5a, 0xc4, 0x4b, 0x4c, 0x78,
+ 0xce, 0x75, 0x3e, 0xc7, 0xe1, 0x86, 0x60, 0xc2, 0xb9, 0xd0, 0x24, 0x7d, 0x44, 0x61, 0xa5, 0x89,
+ 0x8e, 0x55, 0x2e, 0x1f, 0x45, 0x49, 0xaa, 0x44, 0x2c, 0x03, 0x3a, 0x8f, 0x44, 0x40, 0xb4, 0x90,
+ 0x39, 0xd3, 0x4a, 0x48, 0xc4, 0x42, 0xa2, 0x29, 0x2e, 0x0e, 0x99, 0xd0, 0x7b, 0x28, 0x83, 0x0f,
+ 0xbf, 0xf7, 0xf5, 0xfe, 0xa4, 0xed, 0xe0, 0x77, 0x50, 0xcb, 0xdd, 0x8e, 0xf5, 0xc5, 0xea, 0xdb,
+ 0xc3, 0x2e, 0x3a, 0x68, 0x8a, 0xfc, 0x3c, 0xe2, 0x5f, 0xc6, 0x4c, 0x4a, 0x7e, 0x81, 0xc3, 0xff,
+ 0xe0, 0x3d, 0xe3, 0x11, 0xe3, 0x34, 0x9b, 0xd3, 0x29, 0x1b, 0x7b, 0xff, 0xc8, 0x7e, 0x92, 0x86,
+ 0xfe, 0x1a, 0x83, 0x39, 0x4f, 0x4a, 0xbe, 0xcd, 0xde, 0xae, 0x9d, 0x3b, 0x0b, 0xd8, 0x07, 0x32,
+ 0x1c, 0x80, 0x77, 0x9c, 0xac, 0xa9, 0x69, 0xd5, 0xf0, 0xba, 0x2f, 0x5e, 0x5b, 0xb6, 0x86, 0x1f,
+ 0x2f, 0xcf, 0x07, 0xee, 0x0f, 0xe2, 0x5e, 0x8f, 0xdd, 0xb3, 0xf9, 0xcc, 0x9d, 0xa1, 0x9b, 0x9f,
+ 0x17, 0xdf, 0xbe, 0xfa, 0x86, 0x84, 0x0e, 0xa8, 0x25, 0x54, 0x2a, 0x26, 0xb8, 0xe9, 0xd2, 0xf0,
+ 0x8b, 0x2b, 0x1c, 0x80, 0x7a, 0xf1, 0x57, 0x4e, 0xc5, 0xd4, 0x6c, 0xa2, 0x6c, 0x05, 0xa8, 0x58,
+ 0x01, 0x1a, 0xf3, 0x9d, 0xbf, 0xa7, 0xbc, 0x36, 0x80, 0xfb, 0xdf, 0x55, 0x1b, 0x1a, 0xb0, 0x25,
+ 0xa3, 0x12, 0x56, 0x9e, 0x3d, 0xcb, 0xfb, 0x75, 0x7f, 0xfb, 0xf8, 0x54, 0x2d, 0xd7, 0x2d, 0xf0,
+ 0x29, 0x10, 0x6b, 0xb4, 0x62, 0xfa, 0x2a, 0x5e, 0xa0, 0x74, 0x6d, 0x87, 0xa3, 0x7b, 0xcd, 0x93,
+ 0xd9, 0xa7, 0x69, 0xd0, 0xd4, 0x5a, 0x54, 0x4d, 0xe2, 0xe8, 0x35, 0x00, 0x00, 0xff, 0xff, 0x8a,
+ 0x35, 0xc0, 0xf2, 0x35, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
new file mode 100644
index 000000000..6fd7c49a5
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
@@ -0,0 +1,225 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/collection_entry.proto
+
+package xds_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _collection_entry_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on CollectionEntry with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *CollectionEntry) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.ResourceSpecifier.(type) {
+
+ case *CollectionEntry_Locator:
+
+ if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *CollectionEntry_InlineEntry_:
+
+ if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// CollectionEntryValidationError is the validation error returned by
+// CollectionEntry.Validate if the designated constraints aren't met.
+type CollectionEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CollectionEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CollectionEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CollectionEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CollectionEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CollectionEntryValidationError) ErrorName() string { return "CollectionEntryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CollectionEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCollectionEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CollectionEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CollectionEntryValidationError{}
+
+// Validate checks the field values on CollectionEntry_InlineEntry with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *CollectionEntry_InlineEntry) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if !_CollectionEntry_InlineEntry_Name_Pattern.MatchString(m.GetName()) {
+ return CollectionEntry_InlineEntryValidationError{
+ field: "Name",
+ reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\.~:]+$\"",
+ }
+ }
+
+ // no validation rules for Version
+
+ if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// CollectionEntry_InlineEntryValidationError is the validation error returned
+// by CollectionEntry_InlineEntry.Validate if the designated constraints
+// aren't met.
+type CollectionEntry_InlineEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CollectionEntry_InlineEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CollectionEntry_InlineEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CollectionEntry_InlineEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CollectionEntry_InlineEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CollectionEntry_InlineEntryValidationError) ErrorName() string {
+ return "CollectionEntry_InlineEntryValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CollectionEntry_InlineEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCollectionEntry_InlineEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CollectionEntry_InlineEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CollectionEntry_InlineEntryValidationError{}
+
+var _CollectionEntry_InlineEntry_Name_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\.~:]+$")
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
new file mode 100644
index 000000000..dbd6d0ccf
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
@@ -0,0 +1,86 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: xds/core/v3/context_params.proto
+
+package xds_core_v3
+
+import (
+ fmt "fmt"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type ContextParams struct {
+ Params map[string]string `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ContextParams) Reset() { *m = ContextParams{} }
+func (m *ContextParams) String() string { return proto.CompactTextString(m) }
+func (*ContextParams) ProtoMessage() {}
+func (*ContextParams) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a77d5b5f2f15aa7c, []int{0}
+}
+
+func (m *ContextParams) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ContextParams.Unmarshal(m, b)
+}
+func (m *ContextParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ContextParams.Marshal(b, m, deterministic)
+}
+func (m *ContextParams) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ContextParams.Merge(m, src)
+}
+func (m *ContextParams) XXX_Size() int {
+ return xxx_messageInfo_ContextParams.Size(m)
+}
+func (m *ContextParams) XXX_DiscardUnknown() {
+ xxx_messageInfo_ContextParams.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContextParams proto.InternalMessageInfo
+
+func (m *ContextParams) GetParams() map[string]string {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*ContextParams)(nil), "xds.core.v3.ContextParams")
+ proto.RegisterMapType((map[string]string)(nil), "xds.core.v3.ContextParams.ParamsEntry")
+}
+
+func init() { proto.RegisterFile("xds/core/v3/context_params.proto", fileDescriptor_a77d5b5f2f15aa7c) }
+
+var fileDescriptor_a77d5b5f2f15aa7c = []byte{
+ // 221 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xa8, 0x48, 0x29, 0xd6,
+ 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x2f, 0x33, 0xd6, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x89,
+ 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xae, 0x48,
+ 0x29, 0xd6, 0x03, 0xa9, 0xd0, 0x2b, 0x33, 0x96, 0x92, 0x2d, 0x4d, 0x29, 0x48, 0xd4, 0x4f, 0xcc,
+ 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x85,
+ 0xaa, 0x55, 0xea, 0x62, 0xe4, 0xe2, 0x75, 0x86, 0x18, 0x12, 0x00, 0x36, 0x43, 0xc8, 0x8e, 0x8b,
+ 0x0d, 0x62, 0x9a, 0x04, 0xa3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x9a, 0x1e, 0x92, 0x71, 0x7a, 0x28,
+ 0x6a, 0xf5, 0x20, 0x94, 0x6b, 0x5e, 0x49, 0x51, 0x65, 0x10, 0x54, 0x97, 0x94, 0x25, 0x17, 0x37,
+ 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
+ 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0x04, 0x16, 0x83, 0x70,
+ 0xac, 0x98, 0x2c, 0x18, 0x9d, 0xac, 0x77, 0x35, 0x9c, 0xb8, 0xc8, 0xc6, 0xc4, 0xc1, 0xc8, 0x25,
+ 0x9d, 0x9c, 0x9f, 0xab, 0x97, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x07, 0xf2, 0x00, 0xb2, 0x1b,
+ 0x9c, 0x84, 0x50, 0x1c, 0x11, 0x00, 0xf2, 0x47, 0x00, 0x63, 0x12, 0x1b, 0xd8, 0x43, 0xc6, 0x80,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x4e, 0x2e, 0x13, 0x20, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
new file mode 100644
index 000000000..718bb4617
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
@@ -0,0 +1,104 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/context_params.proto
+
+package xds_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _context_params_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on ContextParams with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *ContextParams) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Params
+
+ return nil
+}
+
+// ContextParamsValidationError is the validation error returned by
+// ContextParams.Validate if the designated constraints aren't met.
+type ContextParamsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ContextParamsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ContextParamsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ContextParamsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ContextParamsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ContextParamsValidationError) ErrorName() string { return "ContextParamsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ContextParamsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sContextParams.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ContextParamsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ContextParamsValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
new file mode 100644
index 000000000..12c27ac8c
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
@@ -0,0 +1,104 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: xds/core/v3/resource.proto
+
+package xds_core_v3
+
+import (
+ fmt "fmt"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Resource struct {
+ Name *ResourceName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Resource *any.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Resource) Reset() { *m = Resource{} }
+func (m *Resource) String() string { return proto.CompactTextString(m) }
+func (*Resource) ProtoMessage() {}
+func (*Resource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_acbac04701714df2, []int{0}
+}
+
+func (m *Resource) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Resource.Unmarshal(m, b)
+}
+func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
+}
+func (m *Resource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Resource.Merge(m, src)
+}
+func (m *Resource) XXX_Size() int {
+ return xxx_messageInfo_Resource.Size(m)
+}
+func (m *Resource) XXX_DiscardUnknown() {
+ xxx_messageInfo_Resource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Resource proto.InternalMessageInfo
+
+func (m *Resource) GetName() *ResourceName {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *Resource) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *Resource) GetResource() *any.Any {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Resource)(nil), "xds.core.v3.Resource")
+}
+
+func init() { proto.RegisterFile("xds/core/v3/resource.proto", fileDescriptor_acbac04701714df2) }
+
+var fileDescriptor_acbac04701714df2 = []byte{
+ // 241 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xaa, 0x48, 0x29, 0xd6,
+ 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x2f, 0x33, 0xd6, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e,
+ 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xae, 0x48, 0x29, 0xd6, 0x03, 0xc9, 0xe9, 0x95,
+ 0x19, 0x4b, 0x49, 0xa6, 0xe7, 0xe7, 0xa7, 0xe7, 0xa4, 0xea, 0x83, 0xa5, 0x92, 0x4a, 0xd3, 0xf4,
+ 0x13, 0xf3, 0x2a, 0x21, 0xea, 0xa4, 0x64, 0x4b, 0x53, 0x0a, 0x12, 0xf5, 0x13, 0xf3, 0xf2, 0xf2,
+ 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0xf5, 0x8b, 0x4b, 0x12, 0x4b, 0x4a, 0x8b, 0xa1, 0xd2,
+ 0xf2, 0xd8, 0xac, 0x88, 0xcf, 0x4b, 0xcc, 0x85, 0xda, 0xa3, 0xd4, 0xca, 0xc8, 0xc5, 0x11, 0x04,
+ 0x15, 0x17, 0xd2, 0xe5, 0x62, 0x01, 0x49, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x49, 0xea,
+ 0x21, 0xb9, 0x41, 0x0f, 0xa6, 0xc8, 0x2f, 0x31, 0x37, 0x35, 0x08, 0xac, 0x4c, 0x48, 0x82, 0x8b,
+ 0xbd, 0x2c, 0xb5, 0xa8, 0x38, 0x33, 0x3f, 0x4f, 0x82, 0x49, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6,
+ 0x15, 0x32, 0xe0, 0xe2, 0x80, 0x59, 0x26, 0xc1, 0x0c, 0x36, 0x4c, 0x44, 0x0f, 0xe2, 0x07, 0x3d,
+ 0x98, 0x1f, 0xf4, 0x1c, 0xf3, 0x2a, 0x83, 0xe0, 0xaa, 0x9c, 0xcc, 0x76, 0x35, 0x9c, 0xb8, 0xc8,
+ 0xc6, 0xc4, 0xc1, 0xc8, 0x25, 0x9d, 0x9c, 0x9f, 0xab, 0x97, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4,
+ 0x07, 0xf2, 0x1c, 0xb2, 0x2b, 0x9c, 0x78, 0x61, 0xce, 0x08, 0x00, 0x19, 0x13, 0xc0, 0x98, 0xc4,
+ 0x06, 0x36, 0xcf, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x85, 0xe4, 0x8c, 0x4c, 0x01, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
new file mode 100644
index 000000000..688a3c6fd
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
@@ -0,0 +1,123 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource.proto
+
+package xds_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _resource_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on Resource with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Resource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Version
+
+ if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// ResourceValidationError is the validation error returned by
+// Resource.Validate if the designated constraints aren't met.
+type ResourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
new file mode 100644
index 000000000..4d1943307
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
@@ -0,0 +1,279 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: xds/core/v3/resource_locator.proto
+
+package xds_core_v3
+
+import (
+ fmt "fmt"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type ResourceLocator_Scheme int32
+
+const (
+ ResourceLocator_XDSTP ResourceLocator_Scheme = 0
+ ResourceLocator_HTTP ResourceLocator_Scheme = 1
+ ResourceLocator_FILE ResourceLocator_Scheme = 2
+)
+
+var ResourceLocator_Scheme_name = map[int32]string{
+ 0: "XDSTP",
+ 1: "HTTP",
+ 2: "FILE",
+}
+
+var ResourceLocator_Scheme_value = map[string]int32{
+ "XDSTP": 0,
+ "HTTP": 1,
+ "FILE": 2,
+}
+
+func (x ResourceLocator_Scheme) String() string {
+ return proto.EnumName(ResourceLocator_Scheme_name, int32(x))
+}
+
+func (ResourceLocator_Scheme) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_eb09b3779eaf3665, []int{0, 0}
+}
+
+type ResourceLocator struct {
+ Scheme ResourceLocator_Scheme `protobuf:"varint,1,opt,name=scheme,proto3,enum=xds.core.v3.ResourceLocator_Scheme" json:"scheme,omitempty"`
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
+ ResourceType string `protobuf:"bytes,4,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ // Types that are valid to be assigned to ContextParamSpecifier:
+ // *ResourceLocator_ExactContext
+ ContextParamSpecifier isResourceLocator_ContextParamSpecifier `protobuf_oneof:"context_param_specifier"`
+ Directives []*ResourceLocator_Directive `protobuf:"bytes,6,rep,name=directives,proto3" json:"directives,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResourceLocator) Reset() { *m = ResourceLocator{} }
+func (m *ResourceLocator) String() string { return proto.CompactTextString(m) }
+func (*ResourceLocator) ProtoMessage() {}
+func (*ResourceLocator) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eb09b3779eaf3665, []int{0}
+}
+
+func (m *ResourceLocator) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ResourceLocator.Unmarshal(m, b)
+}
+func (m *ResourceLocator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ResourceLocator.Marshal(b, m, deterministic)
+}
+func (m *ResourceLocator) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceLocator.Merge(m, src)
+}
+func (m *ResourceLocator) XXX_Size() int {
+ return xxx_messageInfo_ResourceLocator.Size(m)
+}
+func (m *ResourceLocator) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceLocator.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceLocator proto.InternalMessageInfo
+
+func (m *ResourceLocator) GetScheme() ResourceLocator_Scheme {
+ if m != nil {
+ return m.Scheme
+ }
+ return ResourceLocator_XDSTP
+}
+
+func (m *ResourceLocator) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *ResourceLocator) GetAuthority() string {
+ if m != nil {
+ return m.Authority
+ }
+ return ""
+}
+
+func (m *ResourceLocator) GetResourceType() string {
+ if m != nil {
+ return m.ResourceType
+ }
+ return ""
+}
+
+type isResourceLocator_ContextParamSpecifier interface {
+ isResourceLocator_ContextParamSpecifier()
+}
+
+type ResourceLocator_ExactContext struct {
+ ExactContext *ContextParams `protobuf:"bytes,5,opt,name=exact_context,json=exactContext,proto3,oneof"`
+}
+
+func (*ResourceLocator_ExactContext) isResourceLocator_ContextParamSpecifier() {}
+
+func (m *ResourceLocator) GetContextParamSpecifier() isResourceLocator_ContextParamSpecifier {
+ if m != nil {
+ return m.ContextParamSpecifier
+ }
+ return nil
+}
+
+func (m *ResourceLocator) GetExactContext() *ContextParams {
+ if x, ok := m.GetContextParamSpecifier().(*ResourceLocator_ExactContext); ok {
+ return x.ExactContext
+ }
+ return nil
+}
+
+func (m *ResourceLocator) GetDirectives() []*ResourceLocator_Directive {
+ if m != nil {
+ return m.Directives
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ResourceLocator) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*ResourceLocator_ExactContext)(nil),
+ }
+}
+
+type ResourceLocator_Directive struct {
+ // Types that are valid to be assigned to Directive:
+ // *ResourceLocator_Directive_Alt
+ // *ResourceLocator_Directive_Entry
+ Directive isResourceLocator_Directive_Directive `protobuf_oneof:"directive"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResourceLocator_Directive) Reset() { *m = ResourceLocator_Directive{} }
+func (m *ResourceLocator_Directive) String() string { return proto.CompactTextString(m) }
+func (*ResourceLocator_Directive) ProtoMessage() {}
+func (*ResourceLocator_Directive) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eb09b3779eaf3665, []int{0, 0}
+}
+
+func (m *ResourceLocator_Directive) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ResourceLocator_Directive.Unmarshal(m, b)
+}
+func (m *ResourceLocator_Directive) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ResourceLocator_Directive.Marshal(b, m, deterministic)
+}
+func (m *ResourceLocator_Directive) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceLocator_Directive.Merge(m, src)
+}
+func (m *ResourceLocator_Directive) XXX_Size() int {
+ return xxx_messageInfo_ResourceLocator_Directive.Size(m)
+}
+func (m *ResourceLocator_Directive) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceLocator_Directive.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceLocator_Directive proto.InternalMessageInfo
+
+type isResourceLocator_Directive_Directive interface {
+ isResourceLocator_Directive_Directive()
+}
+
+type ResourceLocator_Directive_Alt struct {
+ Alt *ResourceLocator `protobuf:"bytes,1,opt,name=alt,proto3,oneof"`
+}
+
+type ResourceLocator_Directive_Entry struct {
+ Entry string `protobuf:"bytes,2,opt,name=entry,proto3,oneof"`
+}
+
+func (*ResourceLocator_Directive_Alt) isResourceLocator_Directive_Directive() {}
+
+func (*ResourceLocator_Directive_Entry) isResourceLocator_Directive_Directive() {}
+
+func (m *ResourceLocator_Directive) GetDirective() isResourceLocator_Directive_Directive {
+ if m != nil {
+ return m.Directive
+ }
+ return nil
+}
+
+func (m *ResourceLocator_Directive) GetAlt() *ResourceLocator {
+ if x, ok := m.GetDirective().(*ResourceLocator_Directive_Alt); ok {
+ return x.Alt
+ }
+ return nil
+}
+
+func (m *ResourceLocator_Directive) GetEntry() string {
+ if x, ok := m.GetDirective().(*ResourceLocator_Directive_Entry); ok {
+ return x.Entry
+ }
+ return ""
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ResourceLocator_Directive) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*ResourceLocator_Directive_Alt)(nil),
+ (*ResourceLocator_Directive_Entry)(nil),
+ }
+}
+
+func init() {
+ proto.RegisterEnum("xds.core.v3.ResourceLocator_Scheme", ResourceLocator_Scheme_name, ResourceLocator_Scheme_value)
+ proto.RegisterType((*ResourceLocator)(nil), "xds.core.v3.ResourceLocator")
+ proto.RegisterType((*ResourceLocator_Directive)(nil), "xds.core.v3.ResourceLocator.Directive")
+}
+
+func init() { proto.RegisterFile("xds/core/v3/resource_locator.proto", fileDescriptor_eb09b3779eaf3665) }
+
+var fileDescriptor_eb09b3779eaf3665 = []byte{
+ // 481 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x41, 0x6f, 0xd3, 0x30,
+ 0x14, 0xc7, 0xeb, 0xb4, 0x0d, 0x8d, 0xbb, 0x8d, 0xc8, 0x42, 0x2c, 0x84, 0x22, 0xa2, 0x82, 0xa0,
+ 0x12, 0x34, 0x99, 0xd2, 0x03, 0x02, 0x89, 0xc3, 0xcc, 0x36, 0x15, 0x69, 0x87, 0x28, 0xeb, 0x01,
+ 0xc1, 0x20, 0xf2, 0x12, 0x43, 0x2d, 0xb5, 0x71, 0x64, 0x3b, 0x55, 0xcb, 0x01, 0x21, 0x4e, 0x9c,
+ 0xf8, 0x40, 0x7c, 0x02, 0xae, 0x7c, 0x1b, 0xd4, 0x13, 0x4a, 0xd2, 0x76, 0xdd, 0x0e, 0xbb, 0x59,
+ 0xef, 0xfd, 0xde, 0xd3, 0xff, 0xfd, 0xfd, 0x87, 0xdd, 0x79, 0x22, 0xbd, 0x98, 0x0b, 0xea, 0xcd,
+ 0x06, 0x9e, 0xa0, 0x92, 0xe7, 0x22, 0xa6, 0xd1, 0x84, 0xc7, 0x44, 0x71, 0xe1, 0x66, 0x82, 0x2b,
+ 0x8e, 0xda, 0xf3, 0x44, 0xba, 0x05, 0xe3, 0xce, 0x06, 0xf6, 0x83, 0x3c, 0xc9, 0x88, 0x47, 0xd2,
+ 0x94, 0x2b, 0xa2, 0x18, 0x4f, 0xa5, 0x27, 0x15, 0x51, 0xb9, 0xac, 0x58, 0xdb, 0xd9, 0xde, 0x17,
+ 0xf3, 0x54, 0xd1, 0xb9, 0x8a, 0x32, 0x22, 0xc8, 0x74, 0x4d, 0xec, 0xcf, 0xc8, 0x84, 0x25, 0x44,
+ 0x51, 0x6f, 0xfd, 0xa8, 0x1a, 0xdd, 0x5f, 0x0d, 0x78, 0x3b, 0x5c, 0x29, 0x38, 0xad, 0x04, 0xa0,
+ 0x63, 0xa8, 0xcb, 0x78, 0x4c, 0xa7, 0xd4, 0x02, 0x0e, 0xe8, 0xed, 0xf9, 0x8f, 0xdc, 0x2d, 0x2d,
+ 0xee, 0x35, 0xda, 0x3d, 0x2b, 0x51, 0xdc, 0x5a, 0xe2, 0xe6, 0x0f, 0xa0, 0x99, 0x20, 0x5c, 0x0d,
+ 0xa3, 0x3d, 0xa8, 0xb1, 0xc4, 0xd2, 0x1c, 0xd0, 0x33, 0x42, 0x8d, 0x25, 0xa8, 0x03, 0x0d, 0x92,
+ 0xab, 0x31, 0x17, 0x4c, 0x2d, 0xac, 0x7a, 0x59, 0xbe, 0x2c, 0xa0, 0xe7, 0x70, 0x77, 0xe3, 0x84,
+ 0x5a, 0x64, 0xd4, 0x6a, 0x14, 0x04, 0xbe, 0xb5, 0xc4, 0x0d, 0x51, 0x6c, 0xdd, 0x59, 0x77, 0x47,
+ 0x8b, 0x8c, 0xa2, 0x43, 0xb8, 0x4b, 0xe7, 0x24, 0x56, 0xd1, 0xea, 0x5a, 0xab, 0xe9, 0x80, 0x5e,
+ 0xdb, 0xb7, 0xaf, 0x28, 0x7d, 0x53, 0xf5, 0x82, 0xd2, 0x88, 0x61, 0x2d, 0xdc, 0x29, 0x47, 0x56,
+ 0x55, 0x74, 0x02, 0x61, 0xc2, 0x04, 0x8d, 0x15, 0x9b, 0x51, 0x69, 0xe9, 0x4e, 0xbd, 0xd7, 0xf6,
+ 0x9f, 0xdc, 0x78, 0xe9, 0xd1, 0x1a, 0x0f, 0xb7, 0x26, 0xed, 0x9f, 0x00, 0x1a, 0x9b, 0x0e, 0x3a,
+ 0x80, 0x75, 0x32, 0x51, 0xa5, 0x71, 0x6d, 0xbf, 0x73, 0xd3, 0xba, 0x61, 0x2d, 0x2c, 0x50, 0xf4,
+ 0x02, 0x36, 0x69, 0xaa, 0xc4, 0xa2, 0x72, 0x0a, 0x3f, 0x5c, 0xe2, 0x8e, 0xb0, 0x4d, 0xe0, 0xdf,
+ 0xfd, 0xf4, 0xe1, 0xa0, 0xff, 0x92, 0xf4, 0xbf, 0x1e, 0xf6, 0xdf, 0x47, 0xe7, 0xfd, 0x73, 0xd7,
+ 0xfb, 0xf6, 0xea, 0xe3, 0xb3, 0xc7, 0xc3, 0x5a, 0x58, 0xf1, 0xd8, 0x84, 0xc6, 0x46, 0x06, 0xaa,
+ 0xff, 0xc3, 0xa0, 0xfb, 0x14, 0xea, 0xd5, 0x6f, 0x20, 0x03, 0x36, 0xdf, 0x1d, 0x9d, 0x8d, 0x02,
+ 0xb3, 0x86, 0x5a, 0xb0, 0x31, 0x1c, 0x8d, 0x02, 0x13, 0x14, 0xaf, 0x93, 0xb7, 0xa7, 0xc7, 0xa6,
+ 0x86, 0xef, 0xc1, 0xfd, 0x2b, 0x31, 0x89, 0x64, 0x46, 0x63, 0xf6, 0x99, 0x51, 0x81, 0x5f, 0xff,
+ 0xfe, 0xfe, 0xe7, 0xaf, 0xae, 0xb5, 0x00, 0xbc, 0x1f, 0xf3, 0xa9, 0xfb, 0x85, 0xa9, 0x71, 0x7e,
+ 0xe1, 0x16, 0xf9, 0xdb, 0x3e, 0x06, 0xdf, 0xb9, 0x76, 0x4d, 0x50, 0xa4, 0x29, 0x00, 0x17, 0x7a,
+ 0x19, 0xab, 0xc1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x46, 0x23, 0x9e, 0xd8, 0xe3, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
new file mode 100644
index 000000000..87769ee3e
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
@@ -0,0 +1,258 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource_locator.proto
+
+package xds_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _resource_locator_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on ResourceLocator with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *ResourceLocator) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if _, ok := ResourceLocator_Scheme_name[int32(m.GetScheme())]; !ok {
+ return ResourceLocatorValidationError{
+ field: "Scheme",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ // no validation rules for Id
+
+ // no validation rules for Authority
+
+ if utf8.RuneCountInString(m.GetResourceType()) < 1 {
+ return ResourceLocatorValidationError{
+ field: "ResourceType",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ for idx, item := range m.GetDirectives() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ switch m.ContextParamSpecifier.(type) {
+
+ case *ResourceLocator_ExactContext:
+
+ if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ResourceLocatorValidationError is the validation error returned by
+// ResourceLocator.Validate if the designated constraints aren't met.
+type ResourceLocatorValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceLocatorValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceLocatorValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceLocatorValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceLocatorValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceLocatorValidationError) ErrorName() string { return "ResourceLocatorValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceLocatorValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceLocator.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceLocatorValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceLocatorValidationError{}
+
+// Validate checks the field values on ResourceLocator_Directive with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *ResourceLocator_Directive) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Directive.(type) {
+
+ case *ResourceLocator_Directive_Alt:
+
+ if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ResourceLocator_Directive_Entry:
+
+ if utf8.RuneCountInString(m.GetEntry()) < 1 {
+ return ResourceLocator_DirectiveValidationError{
+ field: "Entry",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if !_ResourceLocator_Directive_Entry_Pattern.MatchString(m.GetEntry()) {
+ return ResourceLocator_DirectiveValidationError{
+ field: "Entry",
+ reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\./~:]+$\"",
+ }
+ }
+
+ default:
+ return ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// ResourceLocator_DirectiveValidationError is the validation error returned by
+// ResourceLocator_Directive.Validate if the designated constraints aren't met.
+type ResourceLocator_DirectiveValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceLocator_DirectiveValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceLocator_DirectiveValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceLocator_DirectiveValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceLocator_DirectiveValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceLocator_DirectiveValidationError) ErrorName() string {
+ return "ResourceLocator_DirectiveValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceLocator_DirectiveValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceLocator_Directive.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceLocator_DirectiveValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceLocator_DirectiveValidationError{}
+
+var _ResourceLocator_Directive_Entry_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\./~:]+$")
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
new file mode 100644
index 000000000..a935a997f
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
@@ -0,0 +1,113 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: xds/core/v3/resource_name.proto
+
+package xds_core_v3
+
+import (
+ fmt "fmt"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type ResourceName struct {
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"`
+ ResourceType string `protobuf:"bytes,3,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ Context *ContextParams `protobuf:"bytes,4,opt,name=context,proto3" json:"context,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResourceName) Reset() { *m = ResourceName{} }
+func (m *ResourceName) String() string { return proto.CompactTextString(m) }
+func (*ResourceName) ProtoMessage() {}
+func (*ResourceName) Descriptor() ([]byte, []int) {
+ return fileDescriptor_142e5d243416c11e, []int{0}
+}
+
+func (m *ResourceName) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ResourceName.Unmarshal(m, b)
+}
+func (m *ResourceName) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ResourceName.Marshal(b, m, deterministic)
+}
+func (m *ResourceName) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceName.Merge(m, src)
+}
+func (m *ResourceName) XXX_Size() int {
+ return xxx_messageInfo_ResourceName.Size(m)
+}
+func (m *ResourceName) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceName.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceName proto.InternalMessageInfo
+
+func (m *ResourceName) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *ResourceName) GetAuthority() string {
+ if m != nil {
+ return m.Authority
+ }
+ return ""
+}
+
+func (m *ResourceName) GetResourceType() string {
+ if m != nil {
+ return m.ResourceType
+ }
+ return ""
+}
+
+func (m *ResourceName) GetContext() *ContextParams {
+ if m != nil {
+ return m.Context
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*ResourceName)(nil), "xds.core.v3.ResourceName")
+}
+
+func init() { proto.RegisterFile("xds/core/v3/resource_name.proto", fileDescriptor_142e5d243416c11e) }
+
+var fileDescriptor_142e5d243416c11e = []byte{
+ // 271 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x31, 0x4e, 0xc3, 0x30,
+ 0x14, 0x86, 0xe5, 0x50, 0xb5, 0xd4, 0x2d, 0x08, 0xb2, 0x10, 0x05, 0x10, 0x11, 0x53, 0x07, 0x64,
+ 0x4b, 0x84, 0x89, 0x31, 0xec, 0x28, 0x8a, 0xd8, 0xab, 0xd7, 0xd8, 0xa2, 0x96, 0x48, 0x1c, 0xd9,
+ 0xcf, 0x51, 0xb2, 0x71, 0x14, 0xce, 0xc1, 0x09, 0x58, 0xb9, 0x0e, 0x13, 0x4a, 0xd2, 0x40, 0x36,
+ 0xeb, 0xfd, 0x9f, 0xed, 0xff, 0x7b, 0xf4, 0xa6, 0x11, 0x96, 0xe7, 0xda, 0x48, 0x5e, 0xc7, 0xdc,
+ 0x48, 0xab, 0x9d, 0xc9, 0xe5, 0xb6, 0x84, 0x42, 0xb2, 0xca, 0x68, 0xd4, 0xfe, 0xaa, 0x11, 0x96,
+ 0x75, 0x00, 0xab, 0xe3, 0xf0, 0xda, 0x89, 0x0a, 0x38, 0x94, 0xa5, 0x46, 0x40, 0xa5, 0x4b, 0xcb,
+ 0x2d, 0x02, 0x3a, 0x3b, 0xb0, 0x61, 0x34, 0x7d, 0x2c, 0xd7, 0x25, 0xca, 0x06, 0xb7, 0x15, 0x18,
+ 0x28, 0x46, 0xe2, 0xa2, 0x86, 0x37, 0x25, 0x00, 0x25, 0x1f, 0x0f, 0x43, 0x70, 0xfb, 0x41, 0xe8,
+ 0x3a, 0x3b, 0x7c, 0xff, 0x0c, 0x85, 0xf4, 0x4f, 0xa9, 0xa7, 0x44, 0x40, 0x22, 0xb2, 0x59, 0x66,
+ 0x9e, 0x12, 0xfe, 0x15, 0x5d, 0x82, 0xc3, 0xbd, 0x36, 0x0a, 0xdb, 0xc0, 0xeb, 0xc7, 0xff, 0x03,
+ 0xff, 0x8e, 0x9e, 0xfc, 0x95, 0xc7, 0xb6, 0x92, 0xc1, 0x51, 0x47, 0x24, 0x8b, 0x9f, 0x64, 0x66,
+ 0xbc, 0x33, 0x92, 0xad, 0xc7, 0xf4, 0xa5, 0xad, 0xa4, 0xff, 0x40, 0x17, 0x87, 0x76, 0xc1, 0x2c,
+ 0x22, 0x9b, 0xd5, 0x7d, 0xc8, 0x26, 0x96, 0xec, 0x69, 0xc8, 0xd2, 0xbe, 0x78, 0x36, 0xa2, 0xc9,
+ 0xe3, 0xe7, 0xfb, 0xd7, 0xf7, 0xdc, 0x3b, 0x26, 0xf4, 0x32, 0xd7, 0x05, 0x7b, 0x55, 0xb8, 0x77,
+ 0x3b, 0xd6, 0x6d, 0x64, 0x7a, 0x3b, 0x39, 0x9f, 0x6a, 0xa4, 0x9d, 0x5c, 0x4a, 0x76, 0xf3, 0xde,
+ 0x32, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x99, 0xd1, 0xf6, 0x6f, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
new file mode 100644
index 000000000..6ae4ab240
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
@@ -0,0 +1,123 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource_name.proto
+
+package xds_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// define the regex for a UUID once up-front
+var _resource_name_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
+
+// Validate checks the field values on ResourceName with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *ResourceName) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Id
+
+ // no validation rules for Authority
+
+ if utf8.RuneCountInString(m.GetResourceType()) < 1 {
+ return ResourceNameValidationError{
+ field: "ResourceType",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// ResourceNameValidationError is the validation error returned by
+// ResourceName.Validate if the designated constraints aren't met.
+type ResourceNameValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceNameValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceNameValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceNameValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceNameValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceNameValidationError) ErrorName() string { return "ResourceNameValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceNameValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceName.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceNameValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceNameValidationError{}
diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go
index b5200afc0..05a35228c 100644
--- a/vendor/github.com/containerd/containerd/errdefs/errors.go
+++ b/vendor/github.com/containerd/containerd/errdefs/errors.go
@@ -51,43 +51,43 @@ var (
// IsInvalidArgument returns true if the error is due to an invalid argument
func IsInvalidArgument(err error) bool {
- return errors.Cause(err) == ErrInvalidArgument
+ return errors.Is(err, ErrInvalidArgument)
}
// IsNotFound returns true if the error is due to a missing object
func IsNotFound(err error) bool {
- return errors.Cause(err) == ErrNotFound
+ return errors.Is(err, ErrNotFound)
}
// IsAlreadyExists returns true if the error is due to an already existing
// metadata item
func IsAlreadyExists(err error) bool {
- return errors.Cause(err) == ErrAlreadyExists
+ return errors.Is(err, ErrAlreadyExists)
}
// IsFailedPrecondition returns true if an operation could not proceed to the
// lack of a particular condition
func IsFailedPrecondition(err error) bool {
- return errors.Cause(err) == ErrFailedPrecondition
+ return errors.Is(err, ErrFailedPrecondition)
}
// IsUnavailable returns true if the error is due to a resource being unavailable
func IsUnavailable(err error) bool {
- return errors.Cause(err) == ErrUnavailable
+ return errors.Is(err, ErrUnavailable)
}
// IsNotImplemented returns true if the error is due to not being implemented
func IsNotImplemented(err error) bool {
- return errors.Cause(err) == ErrNotImplemented
+ return errors.Is(err, ErrNotImplemented)
}
// IsCanceled returns true if the error is due to `context.Canceled`.
func IsCanceled(err error) bool {
- return errors.Cause(err) == context.Canceled
+ return errors.Is(err, context.Canceled)
}
// IsDeadlineExceeded returns true if the error is due to
// `context.DeadlineExceeded`.
func IsDeadlineExceeded(err error) bool {
- return errors.Cause(err) == context.DeadlineExceeded
+ return errors.Is(err, context.DeadlineExceeded)
}
diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go
index 31f1a3ac0..37b6a7d1c 100644
--- a/vendor/github.com/containerd/containerd/log/context.go
+++ b/vendor/github.com/containerd/containerd/log/context.go
@@ -18,7 +18,6 @@ package log
import (
"context"
- "sync/atomic"
"github.com/sirupsen/logrus"
)
@@ -38,22 +37,17 @@ type (
loggerKey struct{}
)
-// TraceLevel is the log level for tracing. Trace level is lower than debug level,
-// and is usually used to trace detailed behavior of the program.
-const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1))
+const (
+ // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
+ // ensure the formatted time is always the same number of characters.
+ RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
-// ensure the formatted time is always the same number of characters.
-const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+ // TextFormat represents the text logging format
+ TextFormat = "text"
-// ParseLevel takes a string level and returns the Logrus log level constant.
-// It supports trace level.
-func ParseLevel(lvl string) (logrus.Level, error) {
- if lvl == "trace" {
- return TraceLevel, nil
- }
- return logrus.ParseLevel(lvl)
-}
+ // JSONFormat represents the JSON logging format
+ JSONFormat = "json"
+)
// WithLogger returns a new context with the provided logger. Use in
// combination with logger.WithField(s) for great effect.
@@ -72,19 +66,3 @@ func GetLogger(ctx context.Context) *logrus.Entry {
return logger.(*logrus.Entry)
}
-
-// Trace logs a message at level Trace with the log entry passed-in.
-func Trace(e *logrus.Entry, args ...interface{}) {
- level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
- if level >= TraceLevel {
- e.Debug(args...)
- }
-}
-
-// Tracef logs a message at level Trace with the log entry passed-in.
-func Tracef(e *logrus.Entry, format string, args ...interface{}) {
- level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
- if level >= TraceLevel {
- e.Debugf(format, args...)
- }
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go
index 3ad22a10d..c7657e186 100644
--- a/vendor/github.com/containerd/containerd/platforms/compare.go
+++ b/vendor/github.com/containerd/containerd/platforms/compare.go
@@ -16,7 +16,12 @@
package platforms
-import specs "github.com/opencontainers/image-spec/specs-go/v1"
+import (
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
// MatchComparer is able to match and compare platforms to
// filter and sort platforms.
@@ -26,103 +31,70 @@ type MatchComparer interface {
Less(specs.Platform, specs.Platform) bool
}
-// Only returns a match comparer for a single platform
-// using default resolution logic for the platform.
-//
-// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes)
-// For ARMv7, will also match ARMv6 and ARMv5
-// For ARMv6, will also match ARMv5
-func Only(platform specs.Platform) MatchComparer {
- platform = Normalize(platform)
- if platform.Architecture == "arm" {
- if platform.Variant == "v8" {
- return orderedPlatformComparer{
- matchers: []Matcher{
- &matcher{
- Platform: platform,
- },
- &matcher{
- Platform: specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v7",
- },
- },
- &matcher{
- Platform: specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v6",
- },
- },
- &matcher{
- Platform: specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v5",
- },
- },
- },
- }
- }
- if platform.Variant == "v7" {
- return orderedPlatformComparer{
- matchers: []Matcher{
- &matcher{
- Platform: platform,
- },
- &matcher{
- Platform: specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v6",
- },
- },
- &matcher{
- Platform: specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v5",
- },
- },
- },
+// platformVector returns an (ordered) vector of appropriate specs.Platform
+// objects to try matching for the given platform object (see platforms.Only).
+func platformVector(platform specs.Platform) []specs.Platform {
+ vector := []specs.Platform{platform}
+
+ switch platform.Architecture {
+ case "amd64":
+ vector = append(vector, specs.Platform{
+ Architecture: "386",
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: platform.Variant,
+ })
+ case "arm":
+ if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 {
+ for armVersion--; armVersion >= 5; armVersion-- {
+ vector = append(vector, specs.Platform{
+ Architecture: platform.Architecture,
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: "v" + strconv.Itoa(armVersion),
+ })
}
}
- if platform.Variant == "v6" {
- return orderedPlatformComparer{
- matchers: []Matcher{
- &matcher{
- Platform: platform,
- },
- &matcher{
- Platform: specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v5",
- },
- },
- },
- }
+ case "arm64":
+ variant := platform.Variant
+ if variant == "" {
+ variant = "v8"
}
+ vector = append(vector, platformVector(specs.Platform{
+ Architecture: "arm",
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: variant,
+ })...)
}
- return singlePlatformComparer{
- Matcher: &matcher{
- Platform: platform,
- },
- }
+ return vector
+}
+
+// Only returns a match comparer for a single platform
+// using default resolution logic for the platform.
+//
+// For arm/v8, will also match arm/v7, arm/v6 and arm/v5
+// For arm/v7, will also match arm/v6 and arm/v5
+// For arm/v6, will also match arm/v5
+// For amd64, will also match 386
+func Only(platform specs.Platform) MatchComparer {
+ return Ordered(platformVector(Normalize(platform))...)
+}
+
+// OnlyStrict returns a match comparer for a single platform.
+//
+// Unlike Only, OnlyStrict does not match sub platforms.
+// So, "arm/vN" will not match "arm/vM" where M < N,
+// and "amd64" will not also match "386".
+//
+// OnlyStrict matches non-canonical forms.
+// So, "arm64" matches "arm/64/v8".
+func OnlyStrict(platform specs.Platform) MatchComparer {
+ return Ordered(Normalize(platform))
}
// Ordered returns a platform MatchComparer which matches any of the platforms
@@ -153,14 +125,6 @@ func Any(platforms ...specs.Platform) MatchComparer {
// with preference for ordering.
var All MatchComparer = allPlatformComparer{}
-type singlePlatformComparer struct {
- Matcher
-}
-
-func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool {
- return c.Match(p1) && !c.Match(p2)
-}
-
type orderedPlatformComparer struct {
matchers []Matcher
}
diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
index 69b336d67..4a7177e31 100644
--- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
+++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
@@ -21,6 +21,7 @@ import (
"os"
"runtime"
"strings"
+ "sync"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
@@ -28,14 +29,18 @@ import (
)
// Present the ARM instruction set architecture, eg: v7, v8
-var cpuVariant string
+// Don't use this value directly; call cpuVariant() instead.
+var cpuVariantValue string
-func init() {
- if isArmArch(runtime.GOARCH) {
- cpuVariant = getCPUVariant()
- } else {
- cpuVariant = ""
- }
+var cpuVariantOnce sync.Once
+
+func cpuVariant() string {
+ cpuVariantOnce.Do(func() {
+ if isArmArch(runtime.GOARCH) {
+ cpuVariantValue = getCPUVariant()
+ }
+ })
+ return cpuVariantValue
}
// For Linux, the kernel has already detected the ABI, ISA and Features.
@@ -74,8 +79,8 @@ func getCPUInfo(pattern string) (info string, err error) {
}
func getCPUVariant() string {
- if runtime.GOOS == "windows" {
- // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+ // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
// runtime.GOARCH to determine the variants
var variant string
switch runtime.GOARCH {
@@ -96,16 +101,25 @@ func getCPUVariant() string {
return ""
}
- switch variant {
- case "8", "AArch64":
+ // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7")
+ // https://www.raspberrypi.org/forums/viewtopic.php?t=12614
+ if runtime.GOARCH == "arm" && variant == "7" {
+ model, err := getCPUInfo("model name")
+ if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
+ variant = "6"
+ }
+ }
+
+ switch strings.ToLower(variant) {
+ case "8", "aarch64":
variant = "v8"
- case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
+ case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
variant = "v7"
- case "6", "6TEJ":
+ case "6", "6tej":
variant = "v6"
- case "5", "5T", "5TE", "5TEJ":
+ case "5", "5t", "5te", "5tej":
variant = "v5"
- case "4", "4T":
+ case "4", "4t":
variant = "v4"
case "3":
variant = "v3"
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go
index a14d80e58..cb77fbc9f 100644
--- a/vendor/github.com/containerd/containerd/platforms/defaults.go
+++ b/vendor/github.com/containerd/containerd/platforms/defaults.go
@@ -33,6 +33,11 @@ func DefaultSpec() specs.Platform {
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
// The Variant field will be empty if arch != ARM.
- Variant: cpuVariant,
+ Variant: cpuVariant(),
}
}
+
+// DefaultStrict returns strict form of Default.
+func DefaultStrict() MatchComparer {
+ return OnlyStrict(DefaultSpec())
+}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go
index 0defbd36c..0c380e3b7 100644
--- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go
+++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go
@@ -19,13 +19,63 @@
package platforms
import (
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+
+ imagespec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sys/windows"
)
-// Default returns the default matcher for the platform.
+type matchComparer struct {
+ defaults Matcher
+ osVersionPrefix string
+}
+
+// Match matches platform with the same windows major, minor
+// and build version.
+func (m matchComparer) Match(p imagespec.Platform) bool {
+ if m.defaults.Match(p) {
+ // TODO(windows): Figure out whether OSVersion is deprecated.
+ return strings.HasPrefix(p.OSVersion, m.osVersionPrefix)
+ }
+ return false
+}
+
+// Less sorts matched platforms in front of other platforms.
+// For matched platforms, it puts platforms with larger revision
+// number in front.
+func (m matchComparer) Less(p1, p2 imagespec.Platform) bool {
+ m1, m2 := m.Match(p1), m.Match(p2)
+ if m1 && m2 {
+ r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion)
+ return r1 > r2
+ }
+ return m1 && !m2
+}
+
+func revision(v string) int {
+ parts := strings.Split(v, ".")
+ if len(parts) < 4 {
+ return 0
+ }
+ r, err := strconv.Atoi(parts[3])
+ if err != nil {
+ return 0
+ }
+ return r
+}
+
+// Default returns the current platform's default platform specification.
func Default() MatchComparer {
- return Ordered(DefaultSpec(), specs.Platform{
- OS: "linux",
- Architecture: "amd64",
- })
+ major, minor, build := windows.RtlGetNtVersionNumbers()
+ return matchComparer{
+ defaults: Ordered(DefaultSpec(), specs.Platform{
+ OS: "linux",
+ Architecture: runtime.GOARCH,
+ }),
+ osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build),
+ }
}
diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go
index 77d3f184e..088bdea05 100644
--- a/vendor/github.com/containerd/containerd/platforms/platforms.go
+++ b/vendor/github.com/containerd/containerd/platforms/platforms.go
@@ -189,8 +189,8 @@ func Parse(specifier string) (specs.Platform, error) {
if isKnownOS(p.OS) {
// picks a default architecture
p.Architecture = runtime.GOARCH
- if p.Architecture == "arm" && cpuVariant != "v7" {
- p.Variant = cpuVariant
+ if p.Architecture == "arm" && cpuVariant() != "v7" {
+ p.Variant = cpuVariant()
}
return p, nil
diff --git a/vendor/github.com/dennwc/varint/.gitignore b/vendor/github.com/dennwc/varint/.gitignore
new file mode 100644
index 000000000..9385b6db1
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/.gitignore
@@ -0,0 +1,2 @@
+*.o
+*.txt
\ No newline at end of file
diff --git a/vendor/github.com/dennwc/varint/.travis.yml b/vendor/github.com/dennwc/varint/.travis.yml
new file mode 100644
index 000000000..b3da258f5
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+ - 1.12.x
+
+env:
+ - GO111MODULE=on
\ No newline at end of file
diff --git a/vendor/github.com/dennwc/varint/LICENSE b/vendor/github.com/dennwc/varint/LICENSE
new file mode 100644
index 000000000..8b3f68715
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Denys Smirnov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/dennwc/varint/README.md b/vendor/github.com/dennwc/varint/README.md
new file mode 100644
index 000000000..fe15b3b50
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/README.md
@@ -0,0 +1,47 @@
+# varint
+
+This package provides an optimized implementation of protobuf's varint encoding/decoding.
+It has no dependencies.
+
+Benchmarks comparing to a `binary.Uvarint`:
+
+```
+benchmark old ns/op new ns/op delta
+BenchmarkUvarint/1-8 4.13 2.85 -30.99%
+BenchmarkUvarint/1_large-8 4.01 2.28 -43.14%
+BenchmarkUvarint/2-8 6.23 2.87 -53.93%
+BenchmarkUvarint/2_large-8 5.60 2.86 -48.93%
+BenchmarkUvarint/3-8 6.55 3.44 -47.48%
+BenchmarkUvarint/3_large-8 6.54 2.86 -56.27%
+BenchmarkUvarint/4-8 7.30 3.71 -49.18%
+BenchmarkUvarint/4_large-8 7.46 3.10 -58.45%
+BenchmarkUvarint/5-8 8.31 4.12 -50.42%
+BenchmarkUvarint/5_large-8 8.56 3.48 -59.35%
+BenchmarkUvarint/6-8 9.42 4.66 -50.53%
+BenchmarkUvarint/6_large-8 9.91 4.07 -58.93%
+BenchmarkUvarint/7-8 10.6 5.28 -50.19%
+BenchmarkUvarint/7_large-8 11.0 4.70 -57.27%
+BenchmarkUvarint/8-8 11.7 6.02 -48.55%
+BenchmarkUvarint/8_large-8 12.1 5.19 -57.11%
+BenchmarkUvarint/9-8 12.9 6.83 -47.05%
+BenchmarkUvarint/9_large-8 13.1 5.71 -56.41%
+```
+
+It also provides additional functionality like `UvarintSize` (similar to `sov*` in `gogo/protobuf`):
+
+```
+benchmark old ns/op new ns/op delta
+BenchmarkUvarintSize/1-8 1.71 0.43 -74.85%
+BenchmarkUvarintSize/2-8 2.56 0.57 -77.73%
+BenchmarkUvarintSize/3-8 3.22 0.72 -77.64%
+BenchmarkUvarintSize/4-8 3.74 0.72 -80.75%
+BenchmarkUvarintSize/5-8 4.29 0.57 -86.71%
+BenchmarkUvarintSize/6-8 4.85 0.58 -88.04%
+BenchmarkUvarintSize/7-8 5.43 0.71 -86.92%
+BenchmarkUvarintSize/8-8 6.01 0.86 -85.69%
+BenchmarkUvarintSize/9-8 6.64 1.00 -84.94%
+```
+
+# License
+
+MIT
\ No newline at end of file
diff --git a/vendor/github.com/dennwc/varint/go.mod b/vendor/github.com/dennwc/varint/go.mod
new file mode 100644
index 000000000..f7883d4fe
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/go.mod
@@ -0,0 +1,3 @@
+module github.com/dennwc/varint
+
+go 1.12
diff --git a/vendor/github.com/dennwc/varint/proto.go b/vendor/github.com/dennwc/varint/proto.go
new file mode 100644
index 000000000..e3b458547
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/proto.go
@@ -0,0 +1,244 @@
+package varint
+
+// ProtoTag decodes a protobuf's field number and wire type pair
+// from buf and returns that value and the number of bytes read (> 0).
+// If an error occurred, n = 0 is returned.
+func ProtoTag(buf []byte) (num int, typ byte, n int) {
+ // Same unrolled implementation as in Uvarint.
+ //
+ // But this time we can check if the wire type and field num
+ // are valid when reading the first byte.
+ //
+ // Also, the swifts are now different, because first 3 bits
+ // are for the wire type.
+ //
+ // The implementation will stop at 9 bytes, returning an error.
+ sz := len(buf)
+ if sz == 0 {
+ return 0, 0, 0
+ }
+ const (
+ bit = 1 << 7
+ mask = bit - 1
+ step = 7
+
+ // protobuf
+ typBits = 3
+ typMask = 1<<3 - 1
+ )
+ if sz >= 9 { // no bound checks
+ // i == 0
+ b := buf[0]
+ if b == 0 {
+ return 0, 0, 0
+ }
+ typ = b & typMask
+ if typ > 5 {
+ return 0, 0, 0
+ }
+ if b < bit {
+ num = int(b >> typBits)
+ if num == 0 {
+ return 0, 0, 0
+ }
+ n = 1
+ return
+ }
+ num = int((b & mask) >> typBits)
+ var s uint = step - typBits
+
+ // i == 1
+ b = buf[1]
+ if b < bit {
+ num |= int(b) << s
+ n = 2
+ return
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 2
+ b = buf[2]
+ if b < bit {
+ num |= int(b) << s
+ n = 3
+ return
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 3
+ b = buf[3]
+ if b < bit {
+ num |= int(b) << s
+ n = 4
+ return
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 4
+ b = buf[4]
+ if b < bit {
+ num |= int(b) << s
+ n = 5
+ return
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 5
+ b = buf[5]
+ if b < bit {
+ num |= int(b) << s
+ n = 6
+ return
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 6
+ b = buf[6]
+ if b < bit {
+ num |= int(b) << s
+ n = 7
+ return
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 7
+ b = buf[7]
+ if b < bit {
+ num |= int(b) << s
+ n = 8
+ return
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 8
+ b = buf[8]
+ if b < bit {
+ num |= int(b) << s
+ n = 9
+ return
+ }
+ return 0, 0, 0 // too much
+ }
+
+ // i == 0
+ b := buf[0]
+ if b == 0 {
+ return 0, 0, 0
+ }
+ typ = b & typMask
+ if typ > 5 {
+ return 0, 0, 0
+ }
+ if b < bit {
+ num = int(b >> typBits)
+ if num == 0 {
+ return 0, 0, 0
+ }
+ n = 1
+ return
+ } else if sz == 1 {
+ return 0, 0, 0
+ }
+ num = int((b & mask) >> typBits)
+ var s uint = step - typBits
+
+ // i == 1
+ b = buf[1]
+ if b < bit {
+ num |= int(b) << s
+ n = 2
+ return
+ } else if sz == 2 {
+ return 0, 0, 0
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 2
+ b = buf[2]
+ if b < bit {
+ num |= int(b) << s
+ n = 3
+ return
+ } else if sz == 3 {
+ return 0, 0, 0
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 3
+ b = buf[3]
+ if b < bit {
+ num |= int(b) << s
+ n = 4
+ return
+ } else if sz == 4 {
+ return 0, 0, 0
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 4
+ b = buf[4]
+ if b < bit {
+ num |= int(b) << s
+ n = 5
+ return
+ } else if sz == 5 {
+ return 0, 0, 0
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 5
+ b = buf[5]
+ if b < bit {
+ num |= int(b) << s
+ n = 6
+ return
+ } else if sz == 6 {
+ return 0, 0, 0
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 6
+ b = buf[6]
+ if b < bit {
+ num |= int(b) << s
+ n = 7
+ return
+ } else if sz == 7 {
+ return 0, 0, 0
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 7
+ b = buf[7]
+ if b < bit {
+ num |= int(b) << s
+ n = 8
+ return
+ } else if sz == 8 {
+ return 0, 0, 0
+ }
+ num |= int(b&mask) << s
+ s += step
+
+ // i == 8
+ b = buf[8]
+ if b < bit {
+ num |= int(b) << s
+ n = 9
+ return
+ }
+ return 0, 0, 0 // too much
+}
diff --git a/vendor/github.com/dennwc/varint/varint.go b/vendor/github.com/dennwc/varint/varint.go
new file mode 100644
index 000000000..83278c2d7
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/varint.go
@@ -0,0 +1,270 @@
+package varint
+
+const maxUint64 = uint64(1<<64 - 1)
+
+// MaxLenN is the maximum length of a varint-encoded N-bit integer.
+const (
+ MaxLen8 = 2
+ MaxLen16 = 3
+ MaxLen32 = 5
+ MaxLen64 = 10
+)
+
+// MaxValN is the maximum varint-encoded integer that fits in N bytes.
+const (
+ MaxVal9 = maxUint64 >> (1 + iota*7)
+ MaxVal8
+ MaxVal7
+ MaxVal6
+ MaxVal5
+ MaxVal4
+ MaxVal3
+ MaxVal2
+ MaxVal1
+)
+
+// UvarintSize returns the number of bytes necessary to encode a given uint.
+func UvarintSize(x uint64) int {
+ if x <= MaxVal4 {
+ if x <= MaxVal1 {
+ return 1
+ } else if x <= MaxVal2 {
+ return 2
+ } else if x <= MaxVal3 {
+ return 3
+ }
+ return 4
+ }
+ if x <= MaxVal5 {
+ return 5
+ } else if x <= MaxVal6 {
+ return 6
+ } else if x <= MaxVal7 {
+ return 7
+ } else if x <= MaxVal8 {
+ return 8
+ } else if x <= MaxVal9 {
+ return 9
+ }
+ return 10
+}
+
+// Uvarint decodes a uint64 from buf and returns that value and the
+// number of bytes read (> 0). If an error occurred, the value is 0
+// and the number of bytes n is <= 0 meaning:
+//
+// n == 0: buf too small
+// n < 0: value larger than 64 bits (overflow)
+// and -n is the number of bytes read
+//
+func Uvarint(buf []byte) (uint64, int) {
+ // Fully unrolled implementation of binary.Uvarint.
+ //
+ // It will also eliminate bound checks for buffers larger than 9 bytes.
+ sz := len(buf)
+ if sz == 0 {
+ return 0, 0
+ }
+ const (
+ step = 7
+ bit = 1 << 7
+ mask = bit - 1
+ )
+ if sz >= 10 { // no bound checks
+ // i == 0
+ b := buf[0]
+ if b < bit {
+ return uint64(b), 1
+ }
+ x := uint64(b & mask)
+ var s uint = step
+
+ // i == 1
+ b = buf[1]
+ if b < bit {
+ return x | uint64(b)< 1 {
+ return 0, -10 // overflow
+ }
+ return x | uint64(b)< 1 {
+ return 0, -10 // overflow
+ }
+ return x | uint64(b)<= 200 && c <= 299 {
return nil
@@ -414,6 +452,10 @@ func CheckResponse(r *http.Response) error {
}
}
+ if errorResponse.RequestID == "" {
+ errorResponse.RequestID = r.Header.Get("x-request-id")
+ }
+
return errorResponse
}
diff --git a/vendor/github.com/digitalocean/godo/image_actions.go b/vendor/github.com/digitalocean/godo/image_actions.go
index 976f7c687..08953f0ba 100644
--- a/vendor/github.com/digitalocean/godo/image_actions.go
+++ b/vendor/github.com/digitalocean/godo/image_actions.go
@@ -8,7 +8,7 @@ import (
// ImageActionsService is an interface for interfacing with the image actions
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#image-actions
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Image-Actions
type ImageActionsService interface {
Get(context.Context, int, int) (*Action, *Response, error)
Transfer(context.Context, int, *ActionRequest) (*Action, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/images.go b/vendor/github.com/digitalocean/godo/images.go
index cf57a3d52..5db374718 100644
--- a/vendor/github.com/digitalocean/godo/images.go
+++ b/vendor/github.com/digitalocean/godo/images.go
@@ -10,7 +10,7 @@ const imageBasePath = "v2/images"
// ImagesService is an interface for interfacing with the images
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#images
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Images
type ImagesService interface {
List(context.Context, *ListOptions) ([]Image, *Response, error)
ListDistribution(ctx context.Context, opt *ListOptions) ([]Image, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/invoices.go b/vendor/github.com/digitalocean/godo/invoices.go
index c8d7f4083..39bffbc5e 100644
--- a/vendor/github.com/digitalocean/godo/invoices.go
+++ b/vendor/github.com/digitalocean/godo/invoices.go
@@ -12,7 +12,7 @@ const invoicesBasePath = "v2/customers/my/invoices"
// InvoicesService is an interface for interfacing with the Invoice
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2/#invoices
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Billing
type InvoicesService interface {
Get(context.Context, string, *ListOptions) (*Invoice, *Response, error)
GetPDF(context.Context, string) ([]byte, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/keys.go b/vendor/github.com/digitalocean/godo/keys.go
index b97554d14..dec62a7bd 100644
--- a/vendor/github.com/digitalocean/godo/keys.go
+++ b/vendor/github.com/digitalocean/godo/keys.go
@@ -10,7 +10,7 @@ const keysBasePath = "v2/account/keys"
// KeysService is an interface for interfacing with the keys
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#keys
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/SSH-Keys
type KeysService interface {
List(context.Context, *ListOptions) ([]Key, *Response, error)
GetByID(context.Context, int) (*Key, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go
index 93b402567..0c0d85149 100644
--- a/vendor/github.com/digitalocean/godo/kubernetes.go
+++ b/vendor/github.com/digitalocean/godo/kubernetes.go
@@ -21,7 +21,7 @@ const (
// KubernetesService is an interface for interfacing with the Kubernetes endpoints
// of the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#kubernetes
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Kubernetes
type KubernetesService interface {
Create(context.Context, *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error)
Get(context.Context, string) (*KubernetesCluster, *Response, error)
@@ -34,6 +34,9 @@ type KubernetesService interface {
Update(context.Context, string, *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error)
Upgrade(context.Context, string, *KubernetesClusterUpgradeRequest) (*Response, error)
Delete(context.Context, string) (*Response, error)
+ DeleteSelective(context.Context, string, *KubernetesClusterDeleteSelectiveRequest) (*Response, error)
+ DeleteDangerous(context.Context, string) (*Response, error)
+ ListAssociatedResourcesForDeletion(context.Context, string) (*KubernetesAssociatedResources, *Response, error)
CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error)
GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error)
@@ -84,6 +87,13 @@ type KubernetesClusterUpdateRequest struct {
SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
}
+// KubernetesClusterDeleteSelectiveRequest represents a delete selective request to delete a cluster and it's associated resources.
+type KubernetesClusterDeleteSelectiveRequest struct {
+ Volumes []string `json:"volumes"`
+ VolumeSnapshots []string `json:"volume_snapshots"`
+ LoadBalancers []string `json:"load_balancers"`
+}
+
// KubernetesClusterUpgradeRequest represents a request to upgrade a Kubernetes cluster.
type KubernetesClusterUpgradeRequest struct {
VersionSlug string `json:"version,omitempty"`
@@ -192,6 +202,11 @@ type KubernetesCluster struct {
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
+// URN returns the Kubernetes cluster's ID in the format of DigitalOcean URN.
+func (kc KubernetesCluster) URN() string {
+ return ToURN("Kubernetes", kc.ID)
+}
+
// KubernetesClusterUser represents a Kubernetes cluster user.
type KubernetesClusterUser struct {
Username string `json:"username,omitempty"`
@@ -445,6 +460,19 @@ type ClusterlintOwner struct {
Name string `json:"name"`
}
+// KubernetesAssociatedResources represents a cluster's associated resources
+type KubernetesAssociatedResources struct {
+ Volumes []*AssociatedResource `json:"volumes"`
+ VolumeSnapshots []*AssociatedResource `json:"volume_snapshots"`
+ LoadBalancers []*AssociatedResource `json:"load_balancers"`
+}
+
+// AssociatedResource is the object to represent a Kubernetes cluster associated resource's Id and Name.
+type AssociatedResource struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+}
+
type kubernetesClustersRoot struct {
Clusters []*KubernetesCluster `json:"kubernetes_clusters,omitempty"`
Links *Links `json:"links,omitempty"`
@@ -548,6 +576,54 @@ func (svc *KubernetesServiceOp) Delete(ctx context.Context, clusterID string) (*
return resp, nil
}
+// DeleteSelective deletes a Kubernetes cluster and the specified associated resources.
+// Users can choose to delete specific volumes, volume snapshots or load balancers along with the cluster
+// There is no way to recover a cluster or the specified resources once destroyed.
+func (svc *KubernetesServiceOp) DeleteSelective(ctx context.Context, clusterID string, request *KubernetesClusterDeleteSelectiveRequest) (*Response, error) {
+ path := fmt.Sprintf("%s/%s/destroy_with_associated_resources/selective", kubernetesClustersPath, clusterID)
+ req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, request)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := svc.client.Do(ctx, req, nil)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+// DeleteDangerous deletes a Kubernetes cluster and all its associated resources. There is no way to recover a cluster
+// or it's associated resources once destroyed.
+func (svc *KubernetesServiceOp) DeleteDangerous(ctx context.Context, clusterID string) (*Response, error) {
+ path := fmt.Sprintf("%s/%s/destroy_with_associated_resources/dangerous", kubernetesClustersPath, clusterID)
+ req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := svc.client.Do(ctx, req, nil)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+// ListAssociatedResourcesForDeletion lists a Kubernetes cluster's resources that can be selected
+// for deletion along with the cluster. See DeleteSelective
+// Associated resources include volumes, volume snapshots and load balancers.
+func (svc *KubernetesServiceOp) ListAssociatedResourcesForDeletion(ctx context.Context, clusterID string) (*KubernetesAssociatedResources, *Response, error) {
+ path := fmt.Sprintf("%s/%s/destroy_with_associated_resources", kubernetesClustersPath, clusterID)
+ req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ root := new(KubernetesAssociatedResources)
+ resp, err := svc.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ return root, resp, nil
+}
+
// List returns a list of the Kubernetes clusters visible with the caller's API token.
func (svc *KubernetesServiceOp) List(ctx context.Context, opts *ListOptions) ([]*KubernetesCluster, *Response, error) {
path := kubernetesClustersPath
diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go
index b7debfe15..6dd438775 100644
--- a/vendor/github.com/digitalocean/godo/load_balancers.go
+++ b/vendor/github.com/digitalocean/godo/load_balancers.go
@@ -12,7 +12,7 @@ const forwardingRulesPath = "forwarding_rules"
const dropletsPath = "droplets"
// LoadBalancersService is an interface for managing load balancers with the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#load-balancers
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Load-Balancers
type LoadBalancersService interface {
Get(context.Context, string) (*LoadBalancer, *Response, error)
List(context.Context, *ListOptions) ([]LoadBalancer, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/monitoring.go b/vendor/github.com/digitalocean/godo/monitoring.go
new file mode 100644
index 000000000..2b87dc859
--- /dev/null
+++ b/vendor/github.com/digitalocean/godo/monitoring.go
@@ -0,0 +1,220 @@
+package godo
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+)
+
+const (
+ monitoringBasePath = "v2/monitoring"
+ alertPolicyBasePath = monitoringBasePath + "/alerts"
+
+ DropletCPUUtilizationPercent = "v1/insights/droplet/cpu"
+ DropletMemoryUtilizationPercent = "v1/insights/droplet/memory_utilization_percent"
+ DropletDiskUtilizationPercent = "v1/insights/droplet/disk_utilization_percent"
+ DropletPublicOutboundBandwidthRate = "v1/insights/droplet/public_outbound_bandwidth"
+ DropletDiskReadRate = "v1/insights/droplet/disk_read"
+ DropletDiskWriteRate = "v1/insights/droplet/disk_write"
+ DropletOneMinuteLoadAverage = "v1/insights/droplet/load_1"
+ DropletFiveMinuteLoadAverage = "v1/insights/droplet/load_5"
+ DropletFifteenMinuteLoadAverage = "v1/insights/droplet/load_15"
+)
+
+// MonitoringService is an interface for interfacing with the
+// monitoring endpoints of the DigitalOcean API
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Monitoring
+type MonitoringService interface {
+ ListAlertPolicies(context.Context, *ListOptions) ([]AlertPolicy, *Response, error)
+ GetAlertPolicy(context.Context, string) (*AlertPolicy, *Response, error)
+ CreateAlertPolicy(context.Context, *AlertPolicyCreateRequest) (*AlertPolicy, *Response, error)
+ UpdateAlertPolicy(context.Context, string, *AlertPolicyUpdateRequest) (*AlertPolicy, *Response, error)
+ DeleteAlertPolicy(context.Context, string) (*Response, error)
+}
+
+// MonitoringServiceOp handles communication with monitoring related methods of the
+// DigitalOcean API.
+type MonitoringServiceOp struct {
+ client *Client
+}
+
+var _ MonitoringService = &MonitoringServiceOp{}
+
+// AlertPolicy represents a DigitalOcean alert policy
+type AlertPolicy struct {
+ UUID string `json:"uuid"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Compare AlertPolicyComp `json:"compare"`
+ Value float32 `json:"value"`
+ Window string `json:"window"`
+ Entities []string `json:"entities"`
+ Tags []string `json:"tags"`
+ Alerts Alerts `json:"alerts"`
+ Enabled bool `json:"enabled"`
+}
+
+// Alerts represents the alerts section of an alert policy
+type Alerts struct {
+ Slack []SlackDetails `json:"slack"`
+ Email []string `json:"email"`
+}
+
+// SlackDetails represents the details required to send a slack alert
+type SlackDetails struct {
+ URL string `json:"url"`
+ Channel string `json:"channel"`
+}
+
+// AlertPolicyComp represents an alert policy comparison operation
+type AlertPolicyComp string
+
+const (
+ // GreaterThan is the comparison >
+ GreaterThan AlertPolicyComp = "GreaterThan"
+ // LessThan is the comparison <
+ LessThan AlertPolicyComp = "LessThan"
+)
+
+// AlertPolicyCreateRequest holds the info for creating a new alert policy
+type AlertPolicyCreateRequest struct {
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Compare AlertPolicyComp `json:"compare"`
+ Value float32 `json:"value"`
+ Window string `json:"window"`
+ Entities []string `json:"entities"`
+ Tags []string `json:"tags"`
+ Alerts Alerts `json:"alerts"`
+ Enabled *bool `json:"enabled"`
+}
+
+// AlertPolicyUpdateRequest holds the info for updating an existing alert policy
+type AlertPolicyUpdateRequest struct {
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Compare AlertPolicyComp `json:"compare"`
+ Value float32 `json:"value"`
+ Window string `json:"window"`
+ Entities []string `json:"entities"`
+ Tags []string `json:"tags"`
+ Alerts Alerts `json:"alerts"`
+ Enabled *bool `json:"enabled"`
+}
+
+type alertPoliciesRoot struct {
+ AlertPolicies []AlertPolicy `json:"policies"`
+ Links *Links `json:"links"`
+ Meta *Meta `json:"meta"`
+}
+
+type alertPolicyRoot struct {
+ AlertPolicy *AlertPolicy `json:"policy,omitempty"`
+}
+
+// ListAlertPolicies all alert policies
+func (s *MonitoringServiceOp) ListAlertPolicies(ctx context.Context, opt *ListOptions) ([]AlertPolicy, *Response, error) {
+ path := alertPolicyBasePath
+ path, err := addOptions(path, opt)
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(alertPoliciesRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ if l := root.Links; l != nil {
+ resp.Links = l
+ }
+ if m := root.Meta; m != nil {
+ resp.Meta = m
+ }
+ return root.AlertPolicies, resp, err
+}
+
+// GetAlertPolicy gets a single alert policy
+func (s *MonitoringServiceOp) GetAlertPolicy(ctx context.Context, uuid string) (*AlertPolicy, *Response, error) {
+ path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid)
+
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(alertPolicyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.AlertPolicy, resp, err
+}
+
+// CreateAlertPolicy creates a new alert policy
+func (s *MonitoringServiceOp) CreateAlertPolicy(ctx context.Context, createRequest *AlertPolicyCreateRequest) (*AlertPolicy, *Response, error) {
+ if createRequest == nil {
+ return nil, nil, NewArgError("createRequest", "cannot be nil")
+ }
+
+ req, err := s.client.NewRequest(ctx, http.MethodPost, alertPolicyBasePath, createRequest)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(alertPolicyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.AlertPolicy, resp, err
+}
+
+// UpdateAlertPolicy updates an existing alert policy
+func (s *MonitoringServiceOp) UpdateAlertPolicy(ctx context.Context, uuid string, updateRequest *AlertPolicyUpdateRequest) (*AlertPolicy, *Response, error) {
+ if uuid == "" {
+ return nil, nil, NewArgError("uuid", "cannot be empty")
+ }
+ if updateRequest == nil {
+ return nil, nil, NewArgError("updateRequest", "cannot be nil")
+ }
+
+ path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid)
+ req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(alertPolicyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.AlertPolicy, resp, err
+}
+
+// DeleteAlertPolicy deletes an existing alert policy
+func (s *MonitoringServiceOp) DeleteAlertPolicy(ctx context.Context, uuid string) (*Response, error) {
+ if uuid == "" {
+ return nil, NewArgError("uuid", "cannot be empty")
+ }
+
+ path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid)
+ req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := s.client.Do(ctx, req, nil)
+
+ return resp, err
+}
diff --git a/vendor/github.com/digitalocean/godo/projects.go b/vendor/github.com/digitalocean/godo/projects.go
index c31573b29..b59134ba1 100644
--- a/vendor/github.com/digitalocean/godo/projects.go
+++ b/vendor/github.com/digitalocean/godo/projects.go
@@ -17,7 +17,7 @@ const (
)
// ProjectsService is an interface for creating and managing Projects with the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2/#projects
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects
type ProjectsService interface {
List(context.Context, *ListOptions) ([]Project, *Response, error)
GetDefault(context.Context) (*Project, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/regions.go b/vendor/github.com/digitalocean/godo/regions.go
index b07175e8a..ea82f2f1c 100644
--- a/vendor/github.com/digitalocean/godo/regions.go
+++ b/vendor/github.com/digitalocean/godo/regions.go
@@ -7,7 +7,7 @@ import (
// RegionsService is an interface for interfacing with the regions
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#regions
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Regions
type RegionsService interface {
List(context.Context, *ListOptions) ([]Region, *Response, error)
}
diff --git a/vendor/github.com/digitalocean/godo/registry.go b/vendor/github.com/digitalocean/godo/registry.go
index edcfe4f83..dcd4fc755 100644
--- a/vendor/github.com/digitalocean/godo/registry.go
+++ b/vendor/github.com/digitalocean/godo/registry.go
@@ -18,7 +18,7 @@ const (
// RegistryService is an interface for interfacing with the Registry endpoints
// of the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#registry
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Container-Registry
type RegistryService interface {
Create(context.Context, *RegistryCreateRequest) (*Registry, *Response, error)
Get(context.Context) (*Registry, *Response, error)
@@ -59,8 +59,10 @@ type RegistryDockerCredentialsRequest struct {
// Registry represents a registry.
type Registry struct {
- Name string `json:"name,omitempty"`
- CreatedAt time.Time `json:"created_at,omitempty"`
+ Name string `json:"name,omitempty"`
+ StorageUsageBytes uint64 `json:"storage_usage_bytes,omitempty"`
+ StorageUsageBytesUpdatedAt time.Time `json:"storage_usage_bytes_updated_at,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
}
// Repository represents a repository
diff --git a/vendor/github.com/digitalocean/godo/sizes.go b/vendor/github.com/digitalocean/godo/sizes.go
index d2b93ea7f..a3cb74523 100644
--- a/vendor/github.com/digitalocean/godo/sizes.go
+++ b/vendor/github.com/digitalocean/godo/sizes.go
@@ -7,7 +7,7 @@ import (
// SizesService is an interface for interfacing with the size
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#sizes
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Sizes
type SizesService interface {
List(context.Context, *ListOptions) ([]Size, *Response, error)
}
@@ -31,6 +31,7 @@ type Size struct {
Regions []string `json:"regions,omitempty"`
Available bool `json:"available,omitempty"`
Transfer float64 `json:"transfer,omitempty"`
+ Description string `json:"description,omitempty"`
}
func (s Size) String() string {
diff --git a/vendor/github.com/digitalocean/godo/snapshots.go b/vendor/github.com/digitalocean/godo/snapshots.go
index cf95ccc00..bb1b99b0c 100644
--- a/vendor/github.com/digitalocean/godo/snapshots.go
+++ b/vendor/github.com/digitalocean/godo/snapshots.go
@@ -10,7 +10,7 @@ const snapshotBasePath = "v2/snapshots"
// SnapshotsService is an interface for interfacing with the snapshots
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#snapshots
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Snapshots
type SnapshotsService interface {
List(context.Context, *ListOptions) ([]Snapshot, *Response, error)
ListVolume(context.Context, *ListOptions) ([]Snapshot, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/storage.go b/vendor/github.com/digitalocean/godo/storage.go
index 43856e38e..7700ffa08 100644
--- a/vendor/github.com/digitalocean/godo/storage.go
+++ b/vendor/github.com/digitalocean/godo/storage.go
@@ -15,7 +15,7 @@ const (
// StorageService is an interface for interfacing with the storage
// endpoints of the Digital Ocean API.
-// See: https://developers.digitalocean.com/documentation/v2/#block-storage
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Block-Storage
type StorageService interface {
ListVolumes(context.Context, *ListVolumeParams) ([]Volume, *Response, error)
GetVolume(context.Context, string) (*Volume, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/storage_actions.go b/vendor/github.com/digitalocean/godo/storage_actions.go
index 234aba906..b88b18fe1 100644
--- a/vendor/github.com/digitalocean/godo/storage_actions.go
+++ b/vendor/github.com/digitalocean/godo/storage_actions.go
@@ -8,7 +8,7 @@ import (
// StorageActionsService is an interface for interfacing with the
// storage actions endpoints of the Digital Ocean API.
-// See: https://developers.digitalocean.com/documentation/v2#storage-actions
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Block-Storage-Actions
type StorageActionsService interface {
Attach(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error)
DetachByDropletID(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/tags.go b/vendor/github.com/digitalocean/godo/tags.go
index 6301e15f1..8715f1482 100644
--- a/vendor/github.com/digitalocean/godo/tags.go
+++ b/vendor/github.com/digitalocean/godo/tags.go
@@ -10,7 +10,7 @@ const tagsBasePath = "v2/tags"
// TagsService is an interface for interfacing with the tags
// endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#tags
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Tags
type TagsService interface {
List(context.Context, *ListOptions) ([]Tag, *Response, error)
Get(context.Context, string) (*Tag, *Response, error)
diff --git a/vendor/github.com/digitalocean/godo/vpcs.go b/vendor/github.com/digitalocean/godo/vpcs.go
index 6c7b141c9..f4f22e18e 100644
--- a/vendor/github.com/digitalocean/godo/vpcs.go
+++ b/vendor/github.com/digitalocean/godo/vpcs.go
@@ -10,11 +10,12 @@ const vpcsBasePath = "/v2/vpcs"
// VPCsService is an interface for managing Virtual Private Cloud configurations with the
// DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#vpcs
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/VPCs
type VPCsService interface {
Create(context.Context, *VPCCreateRequest) (*VPC, *Response, error)
Get(context.Context, string) (*VPC, *Response, error)
List(context.Context, *ListOptions) ([]*VPC, *Response, error)
+ ListMembers(context.Context, string, *VPCListMembersRequest, *ListOptions) ([]*VPCMember, *Response, error)
Update(context.Context, string, *VPCUpdateRequest) (*VPC, *Response, error)
Set(context.Context, string, ...VPCSetField) (*VPC, *Response, error)
Delete(context.Context, string) (*Response, error)
@@ -77,6 +78,16 @@ type VPC struct {
Default bool `json:"default,omitempty"`
}
+type VPCListMembersRequest struct {
+ ResourceType string `url:"resource_type,omitempty"`
+}
+
+type VPCMember struct {
+ URN string `json:"urn,omitempty"`
+ Name string `json:"name,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+}
+
type vpcRoot struct {
VPC *VPC `json:"vpc"`
}
@@ -87,6 +98,12 @@ type vpcsRoot struct {
Meta *Meta `json:"meta"`
}
+type vpcMembersRoot struct {
+ Members []*VPCMember `json:"members"`
+ Links *Links `json:"links"`
+ Meta *Meta `json:"meta"`
+}
+
// Get returns the details of a Virtual Private Cloud.
func (v *VPCsServiceOp) Get(ctx context.Context, id string) (*VPC, *Response, error) {
path := vpcsBasePath + "/" + id
@@ -214,3 +231,35 @@ func (v *VPCsServiceOp) Delete(ctx context.Context, id string) (*Response, error
return resp, nil
}
+
+func (v *VPCsServiceOp) ListMembers(ctx context.Context, id string, request *VPCListMembersRequest, opt *ListOptions) ([]*VPCMember, *Response, error) {
+ path := vpcsBasePath + "/" + id + "/members"
+ pathWithResourceType, err := addOptions(path, request)
+ if err != nil {
+ return nil, nil, err
+ }
+ pathWithOpts, err := addOptions(pathWithResourceType, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := v.client.NewRequest(ctx, http.MethodGet, pathWithOpts, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(vpcMembersRoot)
+ resp, err := v.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ if l := root.Links; l != nil {
+ resp.Links = l
+ }
+ if m := root.Meta; m != nil {
+ resp.Meta = m
+ }
+
+ return root.Members, resp, nil
+
+}
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 9f1019681..bada4a8e3 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -560,7 +560,7 @@ definitions:
format: "int64"
minimum: 0
maximum: 100
- NanoCPUs:
+ NanoCpus:
description: "CPU quota in units of 10-9 CPUs."
type: "integer"
format: "int64"
@@ -5466,7 +5466,7 @@ paths:
MemorySwap: 0
MemoryReservation: 0
KernelMemory: 0
- NanoCPUs: 500000
+ NanoCpus: 500000
CpuPercent: 80
CpuShares: 512
CpuPeriod: 100000
@@ -5583,12 +5583,12 @@ paths:
schema:
$ref: "#/definitions/ErrorResponse"
404:
- description: "no such container"
+ description: "no such image"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
- message: "No such container: c2ada9df5af8"
+ message: "No such image: c2ada9df5af8"
409:
description: "conflict"
schema:
@@ -7310,7 +7310,7 @@ paths:
For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the
- the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
+ query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
[Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index 68064ca9c..21edf1fa1 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -2,7 +2,7 @@
Package client is a Go client for the Docker Engine API.
For more information about the Engine API, see the documentation:
-https://docs.docker.com/engine/reference/api/
+https://docs.docker.com/engine/api/
Usage
diff --git a/vendor/github.com/envoyproxy/go-control-plane/LICENSE b/vendor/github.com/envoyproxy/go-control-plane/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go
new file mode 100644
index 000000000..338685830
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/annotations/deprecation.proto
+
+package envoy_annotations
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+var file_envoy_annotations_deprecation_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 189503207,
+ Name: "envoy.annotations.disallowed_by_default",
+ Tag: "varint,189503207,opt,name=disallowed_by_default",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 157299826,
+ Name: "envoy.annotations.deprecated_at_minor_version",
+ Tag: "bytes,157299826,opt,name=deprecated_at_minor_version",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 70100853,
+ Name: "envoy.annotations.disallowed_by_default_enum",
+ Tag: "varint,70100853,opt,name=disallowed_by_default_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 181198657,
+ Name: "envoy.annotations.deprecated_at_minor_version_enum",
+ Tag: "bytes,181198657,opt,name=deprecated_at_minor_version_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool disallowed_by_default = 189503207;
+ E_DisallowedByDefault = &file_envoy_annotations_deprecation_proto_extTypes[0]
+ // The API major and minor version on which the field was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version = 157299826;
+ E_DeprecatedAtMinorVersion = &file_envoy_annotations_deprecation_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional bool disallowed_by_default_enum = 70100853;
+ E_DisallowedByDefaultEnum = &file_envoy_annotations_deprecation_proto_extTypes[2]
+ // The API major and minor version on which the enum value was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version_enum = 181198657;
+ E_DeprecatedAtMinorVersionEnum = &file_envoy_annotations_deprecation_proto_extTypes[3]
+)
+
+var File_envoy_annotations_deprecation_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_deprecation_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x54, 0x0a, 0x15, 0x64, 0x69,
+ 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xe7, 0xad, 0xae, 0x5a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x3a, 0x5f, 0x0a, 0x1b, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf2,
+ 0xe8, 0x80, 0x4b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x3a, 0x61, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f,
+ 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12,
+ 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xf5, 0xce, 0xb6, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x6c, 0x0a, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xbe, 0xb3, 0x56,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x1c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e,
+ 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_envoy_annotations_deprecation_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 1: google.protobuf.EnumValueOptions
+}
+var file_envoy_annotations_deprecation_proto_depIdxs = []int32{
+ 0, // 0: envoy.annotations.disallowed_by_default:extendee -> google.protobuf.FieldOptions
+ 0, // 1: envoy.annotations.deprecated_at_minor_version:extendee -> google.protobuf.FieldOptions
+ 1, // 2: envoy.annotations.disallowed_by_default_enum:extendee -> google.protobuf.EnumValueOptions
+ 1, // 3: envoy.annotations.deprecated_at_minor_version_enum:extendee -> google.protobuf.EnumValueOptions
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 0, // [0:4] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_deprecation_proto_init() }
+func file_envoy_annotations_deprecation_proto_init() {
+ if File_envoy_annotations_deprecation_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_deprecation_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_deprecation_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_deprecation_proto_depIdxs,
+ ExtensionInfos: file_envoy_annotations_deprecation_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_deprecation_proto = out.File
+ file_envoy_annotations_deprecation_proto_rawDesc = nil
+ file_envoy_annotations_deprecation_proto_goTypes = nil
+ file_envoy_annotations_deprecation_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go
new file mode 100644
index 000000000..4cf2a75e4
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go
@@ -0,0 +1,34 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/deprecation.proto
+
+package envoy_annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go
new file mode 100644
index 000000000..49f67dc97
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go
@@ -0,0 +1,180 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/annotations/resource.proto
+
+package envoy_annotations
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type ResourceAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource
+ // type.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+}
+
+func (x *ResourceAnnotation) Reset() {
+ *x = ResourceAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceAnnotation) ProtoMessage() {}
+
+func (x *ResourceAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceAnnotation.ProtoReflect.Descriptor instead.
+func (*ResourceAnnotation) Descriptor() ([]byte, []int) {
+ return file_envoy_annotations_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceAnnotation) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+var file_envoy_annotations_resource_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*ResourceAnnotation)(nil),
+ Field: 265073217,
+ Name: "envoy.annotations.resource",
+ Tag: "bytes,265073217,opt,name=resource",
+ Filename: "envoy/annotations/resource.proto",
+ },
+}
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // optional envoy.annotations.ResourceAnnotation resource = 265073217;
+ E_Resource = &file_envoy_annotations_resource_proto_extTypes[0]
+)
+
+var File_envoy_annotations_resource_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_resource_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x3a, 0x65, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1,
+ 0xe4, 0xb2, 0x7e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_annotations_resource_proto_rawDescOnce sync.Once
+ file_envoy_annotations_resource_proto_rawDescData = file_envoy_annotations_resource_proto_rawDesc
+)
+
+func file_envoy_annotations_resource_proto_rawDescGZIP() []byte {
+ file_envoy_annotations_resource_proto_rawDescOnce.Do(func() {
+ file_envoy_annotations_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_annotations_resource_proto_rawDescData)
+ })
+ return file_envoy_annotations_resource_proto_rawDescData
+}
+
+var file_envoy_annotations_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_annotations_resource_proto_goTypes = []interface{}{
+ (*ResourceAnnotation)(nil), // 0: envoy.annotations.ResourceAnnotation
+ (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions
+}
+var file_envoy_annotations_resource_proto_depIdxs = []int32{
+ 1, // 0: envoy.annotations.resource:extendee -> google.protobuf.ServiceOptions
+ 0, // 1: envoy.annotations.resource:type_name -> envoy.annotations.ResourceAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_resource_proto_init() }
+func file_envoy_annotations_resource_proto_init() {
+ if File_envoy_annotations_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_annotations_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_resource_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_resource_proto_depIdxs,
+ MessageInfos: file_envoy_annotations_resource_proto_msgTypes,
+ ExtensionInfos: file_envoy_annotations_resource_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_resource_proto = out.File
+ file_envoy_annotations_resource_proto_rawDesc = nil
+ file_envoy_annotations_resource_proto_goTypes = nil
+ file_envoy_annotations_resource_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go
new file mode 100644
index 000000000..eddbddff7
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go
@@ -0,0 +1,103 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/resource.proto
+
+package envoy_annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on ResourceAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *ResourceAnnotation) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Type
+
+ return nil
+}
+
+// ResourceAnnotationValidationError is the validation error returned by
+// ResourceAnnotation.Validate if the designated constraints aren't met.
+type ResourceAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceAnnotationValidationError) ErrorName() string {
+ return "ResourceAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceAnnotationValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go
new file mode 100644
index 000000000..0501de5fb
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go
@@ -0,0 +1,936 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/address.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type SocketAddress_Protocol int32
+
+const (
+ SocketAddress_TCP SocketAddress_Protocol = 0
+ SocketAddress_UDP SocketAddress_Protocol = 1
+)
+
+// Enum value maps for SocketAddress_Protocol.
+var (
+ SocketAddress_Protocol_name = map[int32]string{
+ 0: "TCP",
+ 1: "UDP",
+ }
+ SocketAddress_Protocol_value = map[string]int32{
+ "TCP": 0,
+ "UDP": 1,
+ }
+)
+
+func (x SocketAddress_Protocol) Enum() *SocketAddress_Protocol {
+ p := new(SocketAddress_Protocol)
+ *p = x
+ return p
+}
+
+func (x SocketAddress_Protocol) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SocketAddress_Protocol) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_address_proto_enumTypes[0].Descriptor()
+}
+
+func (SocketAddress_Protocol) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_address_proto_enumTypes[0]
+}
+
+func (x SocketAddress_Protocol) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SocketAddress_Protocol.Descriptor instead.
+func (SocketAddress_Protocol) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2, 0}
+}
+
+type Pipe struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Unix Domain Socket path. On Linux, paths starting with '@' will use the
+ // abstract namespace. The starting '@' is replaced by a null byte by Envoy.
+ // Paths starting with '@' will result in an error in environments other than
+ // Linux.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // The mode for the Pipe. Not applicable for abstract sockets.
+ Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"`
+}
+
+func (x *Pipe) Reset() {
+ *x = Pipe{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Pipe) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Pipe) ProtoMessage() {}
+
+func (x *Pipe) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Pipe.ProtoReflect.Descriptor instead.
+func (*Pipe) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Pipe) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *Pipe) GetMode() uint32 {
+ if x != nil {
+ return x.Mode
+ }
+ return 0
+}
+
+// [#not-implemented-hide:] The address represents an envoy internal listener.
+// TODO(lambdai): Make this address available for listener and endpoint.
+// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.
+type EnvoyInternalAddress struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to AddressNameSpecifier:
+ // *EnvoyInternalAddress_ServerListenerName
+ AddressNameSpecifier isEnvoyInternalAddress_AddressNameSpecifier `protobuf_oneof:"address_name_specifier"`
+}
+
+func (x *EnvoyInternalAddress) Reset() {
+ *x = EnvoyInternalAddress{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnvoyInternalAddress) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnvoyInternalAddress) ProtoMessage() {}
+
+func (x *EnvoyInternalAddress) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnvoyInternalAddress.ProtoReflect.Descriptor instead.
+func (*EnvoyInternalAddress) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *EnvoyInternalAddress) GetAddressNameSpecifier() isEnvoyInternalAddress_AddressNameSpecifier {
+ if m != nil {
+ return m.AddressNameSpecifier
+ }
+ return nil
+}
+
+func (x *EnvoyInternalAddress) GetServerListenerName() string {
+ if x, ok := x.GetAddressNameSpecifier().(*EnvoyInternalAddress_ServerListenerName); ok {
+ return x.ServerListenerName
+ }
+ return ""
+}
+
+type isEnvoyInternalAddress_AddressNameSpecifier interface {
+ isEnvoyInternalAddress_AddressNameSpecifier()
+}
+
+type EnvoyInternalAddress_ServerListenerName struct {
+ // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener.
+ ServerListenerName string `protobuf:"bytes,1,opt,name=server_listener_name,json=serverListenerName,proto3,oneof"`
+}
+
+func (*EnvoyInternalAddress_ServerListenerName) isEnvoyInternalAddress_AddressNameSpecifier() {}
+
+// [#next-free-field: 7]
+type SocketAddress struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Protocol SocketAddress_Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=envoy.config.core.v3.SocketAddress_Protocol" json:"protocol,omitempty"`
+ // The address for this socket. :ref:`Listeners ` will bind
+ // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::``
+ // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:
+ // It is possible to distinguish a Listener address via the prefix/suffix matching
+ // in :ref:`FilterChainMatch `.] When used
+ // within an upstream :ref:`BindConfig `, the address
+ // controls the source address of outbound connections. For :ref:`clusters
+ // `, the cluster type determines whether the
+ // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS
+ // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized
+ // via :ref:`resolver_name `.
+ Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+ // Types that are assignable to PortSpecifier:
+ // *SocketAddress_PortValue
+ // *SocketAddress_NamedPort
+ PortSpecifier isSocketAddress_PortSpecifier `protobuf_oneof:"port_specifier"`
+ // The name of the custom resolver. This must have been registered with Envoy. If
+ // this is empty, a context dependent default applies. If the address is a concrete
+ // IP address, no resolution will occur. If address is a hostname this
+ // should be set for resolution other than DNS. Specifying a custom resolver with
+ // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime.
+ ResolverName string `protobuf:"bytes,5,opt,name=resolver_name,json=resolverName,proto3" json:"resolver_name,omitempty"`
+ // When binding to an IPv6 address above, this enables `IPv4 compatibility
+ // `_. Binding to ``::`` will
+ // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into
+ // IPv6 space as ``::FFFF:``.
+ Ipv4Compat bool `protobuf:"varint,6,opt,name=ipv4_compat,json=ipv4Compat,proto3" json:"ipv4_compat,omitempty"`
+}
+
+func (x *SocketAddress) Reset() {
+ *x = SocketAddress{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SocketAddress) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SocketAddress) ProtoMessage() {}
+
+func (x *SocketAddress) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SocketAddress.ProtoReflect.Descriptor instead.
+func (*SocketAddress) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SocketAddress) GetProtocol() SocketAddress_Protocol {
+ if x != nil {
+ return x.Protocol
+ }
+ return SocketAddress_TCP
+}
+
+func (x *SocketAddress) GetAddress() string {
+ if x != nil {
+ return x.Address
+ }
+ return ""
+}
+
+func (m *SocketAddress) GetPortSpecifier() isSocketAddress_PortSpecifier {
+ if m != nil {
+ return m.PortSpecifier
+ }
+ return nil
+}
+
+func (x *SocketAddress) GetPortValue() uint32 {
+ if x, ok := x.GetPortSpecifier().(*SocketAddress_PortValue); ok {
+ return x.PortValue
+ }
+ return 0
+}
+
+func (x *SocketAddress) GetNamedPort() string {
+ if x, ok := x.GetPortSpecifier().(*SocketAddress_NamedPort); ok {
+ return x.NamedPort
+ }
+ return ""
+}
+
+func (x *SocketAddress) GetResolverName() string {
+ if x != nil {
+ return x.ResolverName
+ }
+ return ""
+}
+
+func (x *SocketAddress) GetIpv4Compat() bool {
+ if x != nil {
+ return x.Ipv4Compat
+ }
+ return false
+}
+
+type isSocketAddress_PortSpecifier interface {
+ isSocketAddress_PortSpecifier()
+}
+
+type SocketAddress_PortValue struct {
+ PortValue uint32 `protobuf:"varint,3,opt,name=port_value,json=portValue,proto3,oneof"`
+}
+
+type SocketAddress_NamedPort struct {
+ // This is only valid if :ref:`resolver_name
+ // ` is specified below and the
+ // named resolver is capable of named port resolution.
+ NamedPort string `protobuf:"bytes,4,opt,name=named_port,json=namedPort,proto3,oneof"`
+}
+
+func (*SocketAddress_PortValue) isSocketAddress_PortSpecifier() {}
+
+func (*SocketAddress_NamedPort) isSocketAddress_PortSpecifier() {}
+
+type TcpKeepalive struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Maximum number of keepalive probes to send without response before deciding
+ // the connection is dead. Default is to use the OS level configuration (unless
+ // overridden, Linux defaults to 9.)
+ KeepaliveProbes *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=keepalive_probes,json=keepaliveProbes,proto3" json:"keepalive_probes,omitempty"`
+ // The number of seconds a connection needs to be idle before keep-alive probes
+ // start being sent. Default is to use the OS level configuration (unless
+ // overridden, Linux defaults to 7200s (i.e., 2 hours.)
+ KeepaliveTime *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=keepalive_time,json=keepaliveTime,proto3" json:"keepalive_time,omitempty"`
+ // The number of seconds between keep-alive probes. Default is to use the OS
+ // level configuration (unless overridden, Linux defaults to 75s.)
+ KeepaliveInterval *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=keepalive_interval,json=keepaliveInterval,proto3" json:"keepalive_interval,omitempty"`
+}
+
+func (x *TcpKeepalive) Reset() {
+ *x = TcpKeepalive{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TcpKeepalive) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TcpKeepalive) ProtoMessage() {}
+
+func (x *TcpKeepalive) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TcpKeepalive.ProtoReflect.Descriptor instead.
+func (*TcpKeepalive) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TcpKeepalive) GetKeepaliveProbes() *wrappers.UInt32Value {
+ if x != nil {
+ return x.KeepaliveProbes
+ }
+ return nil
+}
+
+func (x *TcpKeepalive) GetKeepaliveTime() *wrappers.UInt32Value {
+ if x != nil {
+ return x.KeepaliveTime
+ }
+ return nil
+}
+
+func (x *TcpKeepalive) GetKeepaliveInterval() *wrappers.UInt32Value {
+ if x != nil {
+ return x.KeepaliveInterval
+ }
+ return nil
+}
+
+type BindConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The address to bind to when creating a socket.
+ SourceAddress *SocketAddress `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
+ // Whether to set the *IP_FREEBIND* option when creating the socket. When this
+ // flag is set to true, allows the :ref:`source_address
+ // ` to be an IP address
+ // that is not configured on the system running Envoy. When this flag is set
+ // to false, the option *IP_FREEBIND* is disabled on the socket. When this
+ // flag is not set (default), the socket is not modified, i.e. the option is
+ // neither enabled nor disabled.
+ Freebind *wrappers.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"`
+ // Additional socket options that may not be present in Envoy source code or
+ // precompiled binaries.
+ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"`
+}
+
+func (x *BindConfig) Reset() {
+ *x = BindConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BindConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BindConfig) ProtoMessage() {}
+
+func (x *BindConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BindConfig.ProtoReflect.Descriptor instead.
+func (*BindConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *BindConfig) GetSourceAddress() *SocketAddress {
+ if x != nil {
+ return x.SourceAddress
+ }
+ return nil
+}
+
+func (x *BindConfig) GetFreebind() *wrappers.BoolValue {
+ if x != nil {
+ return x.Freebind
+ }
+ return nil
+}
+
+func (x *BindConfig) GetSocketOptions() []*SocketOption {
+ if x != nil {
+ return x.SocketOptions
+ }
+ return nil
+}
+
+// Addresses specify either a logical or physical address and port, which are
+// used to tell Envoy where to bind/listen, connect to upstream and find
+// management servers.
+type Address struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Address:
+ // *Address_SocketAddress
+ // *Address_Pipe
+ // *Address_EnvoyInternalAddress
+ Address isAddress_Address `protobuf_oneof:"address"`
+}
+
+func (x *Address) Reset() {
+ *x = Address{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Address) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
+func (*Address) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{5}
+}
+
+func (m *Address) GetAddress() isAddress_Address {
+ if m != nil {
+ return m.Address
+ }
+ return nil
+}
+
+func (x *Address) GetSocketAddress() *SocketAddress {
+ if x, ok := x.GetAddress().(*Address_SocketAddress); ok {
+ return x.SocketAddress
+ }
+ return nil
+}
+
+func (x *Address) GetPipe() *Pipe {
+ if x, ok := x.GetAddress().(*Address_Pipe); ok {
+ return x.Pipe
+ }
+ return nil
+}
+
+func (x *Address) GetEnvoyInternalAddress() *EnvoyInternalAddress {
+ if x, ok := x.GetAddress().(*Address_EnvoyInternalAddress); ok {
+ return x.EnvoyInternalAddress
+ }
+ return nil
+}
+
+type isAddress_Address interface {
+ isAddress_Address()
+}
+
+type Address_SocketAddress struct {
+ SocketAddress *SocketAddress `protobuf:"bytes,1,opt,name=socket_address,json=socketAddress,proto3,oneof"`
+}
+
+type Address_Pipe struct {
+ Pipe *Pipe `protobuf:"bytes,2,opt,name=pipe,proto3,oneof"`
+}
+
+type Address_EnvoyInternalAddress struct {
+ // [#not-implemented-hide:]
+ EnvoyInternalAddress *EnvoyInternalAddress `protobuf:"bytes,3,opt,name=envoy_internal_address,json=envoyInternalAddress,proto3,oneof"`
+}
+
+func (*Address_SocketAddress) isAddress_Address() {}
+
+func (*Address_Pipe) isAddress_Address() {}
+
+func (*Address_EnvoyInternalAddress) isAddress_Address() {}
+
+// CidrRange specifies an IP Address and a prefix length to construct
+// the subnet mask for a `CIDR `_ range.
+type CidrRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.
+ AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"`
+ // Length of prefix, e.g. 0, 32. Defaults to 0 when unset.
+ PrefixLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"`
+}
+
+func (x *CidrRange) Reset() {
+ *x = CidrRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CidrRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CidrRange) ProtoMessage() {}
+
+func (x *CidrRange) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead.
+func (*CidrRange) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *CidrRange) GetAddressPrefix() string {
+ if x != nil {
+ return x.AddressPrefix
+ }
+ return ""
+}
+
+func (x *CidrRange) GetPrefixLen() *wrappers.UInt32Value {
+ if x != nil {
+ return x.PrefixLen
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_address_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_address_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x60, 0x0a, 0x04, 0x50, 0x69, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0xff, 0x03, 0x52, 0x04, 0x6d, 0x6f,
+ 0x64, 0x65, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x69, 0x70,
+ 0x65, 0x22, 0x69, 0x0a, 0x14, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x1d, 0x0a,
+ 0x16, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x70,
+ 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf6, 0x02, 0x0a,
+ 0x0d, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52,
+ 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08,
+ 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x12, 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04,
+ 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x6f,
+ 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x6c,
+ 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x70, 0x76, 0x34, 0x5f,
+ 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x70,
+ 0x76, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x22, 0x1c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a,
+ 0x03, 0x55, 0x44, 0x50, 0x10, 0x01, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x15,
+ 0x0a, 0x0e, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x90, 0x02, 0x0a, 0x0c, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65,
+ 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c,
+ 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f,
+ 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x12,
+ 0x43, 0x0a, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65,
+ 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76,
+ 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11,
+ 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x63, 0x70, 0x4b,
+ 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x0a, 0x42, 0x69, 0x6e,
+ 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0d,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x36, 0x0a,
+ 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, 0x65,
+ 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9f, 0x02, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00,
+ 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
+ 0x30, 0x0a, 0x04, 0x70, 0x69, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x70,
+ 0x65, 0x12, 0x62, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52,
+ 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0e, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xa6, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
+ 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50,
+ 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f,
+ 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74,
+ 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80,
+ 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x3a, 0x22, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x42, 0x3c, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_address_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_address_proto_rawDescData = file_envoy_config_core_v3_address_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_address_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_address_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_address_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_address_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_address_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_address_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_address_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_envoy_config_core_v3_address_proto_goTypes = []interface{}{
+ (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol
+ (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe
+ (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress
+ (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress
+ (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive
+ (*BindConfig)(nil), // 5: envoy.config.core.v3.BindConfig
+ (*Address)(nil), // 6: envoy.config.core.v3.Address
+ (*CidrRange)(nil), // 7: envoy.config.core.v3.CidrRange
+ (*wrappers.UInt32Value)(nil), // 8: google.protobuf.UInt32Value
+ (*wrappers.BoolValue)(nil), // 9: google.protobuf.BoolValue
+ (*SocketOption)(nil), // 10: envoy.config.core.v3.SocketOption
+}
+var file_envoy_config_core_v3_address_proto_depIdxs = []int32{
+ 0, // 0: envoy.config.core.v3.SocketAddress.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol
+ 8, // 1: envoy.config.core.v3.TcpKeepalive.keepalive_probes:type_name -> google.protobuf.UInt32Value
+ 8, // 2: envoy.config.core.v3.TcpKeepalive.keepalive_time:type_name -> google.protobuf.UInt32Value
+ 8, // 3: envoy.config.core.v3.TcpKeepalive.keepalive_interval:type_name -> google.protobuf.UInt32Value
+ 3, // 4: envoy.config.core.v3.BindConfig.source_address:type_name -> envoy.config.core.v3.SocketAddress
+ 9, // 5: envoy.config.core.v3.BindConfig.freebind:type_name -> google.protobuf.BoolValue
+ 10, // 6: envoy.config.core.v3.BindConfig.socket_options:type_name -> envoy.config.core.v3.SocketOption
+ 3, // 7: envoy.config.core.v3.Address.socket_address:type_name -> envoy.config.core.v3.SocketAddress
+ 1, // 8: envoy.config.core.v3.Address.pipe:type_name -> envoy.config.core.v3.Pipe
+ 2, // 9: envoy.config.core.v3.Address.envoy_internal_address:type_name -> envoy.config.core.v3.EnvoyInternalAddress
+ 8, // 10: envoy.config.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value
+ 11, // [11:11] is the sub-list for method output_type
+ 11, // [11:11] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_address_proto_init() }
+func file_envoy_config_core_v3_address_proto_init() {
+ if File_envoy_config_core_v3_address_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_socket_option_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_address_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Pipe); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnvoyInternalAddress); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SocketAddress); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TcpKeepalive); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BindConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Address); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CidrRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*EnvoyInternalAddress_ServerListenerName)(nil),
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*SocketAddress_PortValue)(nil),
+ (*SocketAddress_NamedPort)(nil),
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[5].OneofWrappers = []interface{}{
+ (*Address_SocketAddress)(nil),
+ (*Address_Pipe)(nil),
+ (*Address_EnvoyInternalAddress)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_address_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_address_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_address_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_address_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_address_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_address_proto = out.File
+ file_envoy_config_core_v3_address_proto_rawDesc = nil
+ file_envoy_config_core_v3_address_proto_goTypes = nil
+ file_envoy_config_core_v3_address_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go
new file mode 100644
index 000000000..fad807ad4
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go
@@ -0,0 +1,690 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/address.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on Pipe with the rules defined in the proto
+// definition for this message. If any rules are violated, an error is returned.
+func (m *Pipe) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ return PipeValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if m.GetMode() > 511 {
+ return PipeValidationError{
+ field: "Mode",
+ reason: "value must be less than or equal to 511",
+ }
+ }
+
+ return nil
+}
+
+// PipeValidationError is the validation error returned by Pipe.Validate if the
+// designated constraints aren't met.
+type PipeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e PipeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e PipeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e PipeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e PipeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e PipeValidationError) ErrorName() string { return "PipeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e PipeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sPipe.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = PipeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = PipeValidationError{}
+
+// Validate checks the field values on EnvoyInternalAddress with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *EnvoyInternalAddress) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.AddressNameSpecifier.(type) {
+
+ case *EnvoyInternalAddress_ServerListenerName:
+ // no validation rules for ServerListenerName
+
+ default:
+ return EnvoyInternalAddressValidationError{
+ field: "AddressNameSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// EnvoyInternalAddressValidationError is the validation error returned by
+// EnvoyInternalAddress.Validate if the designated constraints aren't met.
+type EnvoyInternalAddressValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EnvoyInternalAddressValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EnvoyInternalAddressValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EnvoyInternalAddressValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EnvoyInternalAddressValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EnvoyInternalAddressValidationError) ErrorName() string {
+ return "EnvoyInternalAddressValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EnvoyInternalAddressValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEnvoyInternalAddress.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EnvoyInternalAddressValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EnvoyInternalAddressValidationError{}
+
+// Validate checks the field values on SocketAddress with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *SocketAddress) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if _, ok := SocketAddress_Protocol_name[int32(m.GetProtocol())]; !ok {
+ return SocketAddressValidationError{
+ field: "Protocol",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetAddress()) < 1 {
+ return SocketAddressValidationError{
+ field: "Address",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ // no validation rules for ResolverName
+
+ // no validation rules for Ipv4Compat
+
+ switch m.PortSpecifier.(type) {
+
+ case *SocketAddress_PortValue:
+
+ if m.GetPortValue() > 65535 {
+ return SocketAddressValidationError{
+ field: "PortValue",
+ reason: "value must be less than or equal to 65535",
+ }
+ }
+
+ case *SocketAddress_NamedPort:
+ // no validation rules for NamedPort
+
+ default:
+ return SocketAddressValidationError{
+ field: "PortSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// SocketAddressValidationError is the validation error returned by
+// SocketAddress.Validate if the designated constraints aren't met.
+type SocketAddressValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SocketAddressValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SocketAddressValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SocketAddressValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SocketAddressValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SocketAddressValidationError) ErrorName() string { return "SocketAddressValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SocketAddressValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSocketAddress.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SocketAddressValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SocketAddressValidationError{}
+
+// Validate checks the field values on TcpKeepalive with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *TcpKeepalive) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetKeepaliveProbes()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TcpKeepaliveValidationError{
+ field: "KeepaliveProbes",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetKeepaliveTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TcpKeepaliveValidationError{
+ field: "KeepaliveTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetKeepaliveInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TcpKeepaliveValidationError{
+ field: "KeepaliveInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// TcpKeepaliveValidationError is the validation error returned by
+// TcpKeepalive.Validate if the designated constraints aren't met.
+type TcpKeepaliveValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TcpKeepaliveValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TcpKeepaliveValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TcpKeepaliveValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TcpKeepaliveValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TcpKeepaliveValidationError) ErrorName() string { return "TcpKeepaliveValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TcpKeepaliveValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTcpKeepalive.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TcpKeepaliveValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TcpKeepaliveValidationError{}
+
+// Validate checks the field values on BindConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *BindConfig) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetSourceAddress() == nil {
+ return BindConfigValidationError{
+ field: "SourceAddress",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetSourceAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: "SourceAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetFreebind()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: "Freebind",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetSocketOptions() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// BindConfigValidationError is the validation error returned by
+// BindConfig.Validate if the designated constraints aren't met.
+type BindConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BindConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BindConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BindConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BindConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BindConfigValidationError) ErrorName() string { return "BindConfigValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BindConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBindConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BindConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BindConfigValidationError{}
+
+// Validate checks the field values on Address with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Address) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Address.(type) {
+
+ case *Address_SocketAddress:
+
+ if v, ok := interface{}(m.GetSocketAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AddressValidationError{
+ field: "SocketAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Address_Pipe:
+
+ if v, ok := interface{}(m.GetPipe()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AddressValidationError{
+ field: "Pipe",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Address_EnvoyInternalAddress:
+
+ if v, ok := interface{}(m.GetEnvoyInternalAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AddressValidationError{
+ field: "EnvoyInternalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return AddressValidationError{
+ field: "Address",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// AddressValidationError is the validation error returned by Address.Validate
+// if the designated constraints aren't met.
+type AddressValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AddressValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AddressValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AddressValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AddressValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AddressValidationError) ErrorName() string { return "AddressValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AddressValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAddress.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AddressValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AddressValidationError{}
+
+// Validate checks the field values on CidrRange with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *CidrRange) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 {
+ return CidrRangeValidationError{
+ field: "AddressPrefix",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if wrapper := m.GetPrefixLen(); wrapper != nil {
+
+ if wrapper.GetValue() > 128 {
+ return CidrRangeValidationError{
+ field: "PrefixLen",
+ reason: "value must be less than or equal to 128",
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// CidrRangeValidationError is the validation error returned by
+// CidrRange.Validate if the designated constraints aren't met.
+type CidrRangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CidrRangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CidrRangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CidrRangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CidrRangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CidrRangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCidrRange.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CidrRangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CidrRangeValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go
new file mode 100644
index 000000000..c68dd65a5
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go
@@ -0,0 +1,194 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/backoff.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Configuration defining a jittered exponential back off strategy.
+type BackoffStrategy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The base interval to be used for the next back off computation. It should
+ // be greater than zero and less than or equal to :ref:`max_interval
+ // `.
+ BaseInterval *duration.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"`
+ // Specifies the maximum interval between retries. This parameter is optional,
+ // but must be greater than or equal to the :ref:`base_interval
+ // ` if set. The default
+ // is 10 times the :ref:`base_interval
+ // `.
+ MaxInterval *duration.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"`
+}
+
+func (x *BackoffStrategy) Reset() {
+ *x = BackoffStrategy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BackoffStrategy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BackoffStrategy) ProtoMessage() {}
+
+func (x *BackoffStrategy) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BackoffStrategy.ProtoReflect.Descriptor instead.
+func (*BackoffStrategy) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_backoff_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *BackoffStrategy) GetBaseInterval() *duration.Duration {
+ if x != nil {
+ return x.BaseInterval
+ }
+ return nil
+}
+
+func (x *BackoffStrategy) GetMaxInterval() *duration.Duration {
+ if x != nil {
+ return x.MaxInterval
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_backoff_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_backoff_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61,
+ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd3, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66,
+ 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73,
+ 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b,
+ 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73,
+ 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa,
+ 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b,
+ 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, 0x3c, 0x0a, 0x22, 0x69,
+ 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x42, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_backoff_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_backoff_proto_rawDescData = file_envoy_config_core_v3_backoff_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_backoff_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_backoff_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_backoff_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_backoff_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_backoff_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_backoff_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_backoff_proto_goTypes = []interface{}{
+ (*BackoffStrategy)(nil), // 0: envoy.config.core.v3.BackoffStrategy
+ (*duration.Duration)(nil), // 1: google.protobuf.Duration
+}
+var file_envoy_config_core_v3_backoff_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.BackoffStrategy.base_interval:type_name -> google.protobuf.Duration
+ 1, // 1: envoy.config.core.v3.BackoffStrategy.max_interval:type_name -> google.protobuf.Duration
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_backoff_proto_init() }
+func file_envoy_config_core_v3_backoff_proto_init() {
+ if File_envoy_config_core_v3_backoff_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_backoff_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BackoffStrategy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_backoff_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_backoff_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_backoff_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_backoff_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_backoff_proto = out.File
+ file_envoy_config_core_v3_backoff_proto_rawDesc = nil
+ file_envoy_config_core_v3_backoff_proto_goTypes = nil
+ file_envoy_config_core_v3_backoff_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go
new file mode 100644
index 000000000..6ca23aaa6
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go
@@ -0,0 +1,148 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/backoff.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on BackoffStrategy with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *BackoffStrategy) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetBaseInterval() == nil {
+ return BackoffStrategyValidationError{
+ field: "BaseInterval",
+ reason: "value is required",
+ }
+ }
+
+ if d := m.GetBaseInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return BackoffStrategyValidationError{
+ field: "BaseInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte {
+ return BackoffStrategyValidationError{
+ field: "BaseInterval",
+ reason: "value must be greater than or equal to 1ms",
+ }
+ }
+
+ }
+
+ if d := m.GetMaxInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return BackoffStrategyValidationError{
+ field: "MaxInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return BackoffStrategyValidationError{
+ field: "MaxInterval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// BackoffStrategyValidationError is the validation error returned by
+// BackoffStrategy.Validate if the designated constraints aren't met.
+type BackoffStrategyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BackoffStrategyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BackoffStrategyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BackoffStrategyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BackoffStrategyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BackoffStrategyValidationError) ErrorName() string { return "BackoffStrategyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BackoffStrategyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBackoffStrategy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BackoffStrategyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BackoffStrategyValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go
new file mode 100644
index 000000000..8641d7ade
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go
@@ -0,0 +1,2423 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/base.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v31 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Envoy supports :ref:`upstream priority routing
+// ` both at the route and the virtual
+// cluster level. The current priority implementation uses different connection
+// pool and circuit breaking settings for each priority level. This means that
+// even for HTTP/2 requests, two physical connections will be used to an
+// upstream host. In the future Envoy will likely support true HTTP/2 priority
+// over a single upstream connection.
+type RoutingPriority int32
+
+const (
+ RoutingPriority_DEFAULT RoutingPriority = 0
+ RoutingPriority_HIGH RoutingPriority = 1
+)
+
+// Enum value maps for RoutingPriority.
+var (
+ RoutingPriority_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "HIGH",
+ }
+ RoutingPriority_value = map[string]int32{
+ "DEFAULT": 0,
+ "HIGH": 1,
+ }
+)
+
+func (x RoutingPriority) Enum() *RoutingPriority {
+ p := new(RoutingPriority)
+ *p = x
+ return p
+}
+
+func (x RoutingPriority) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RoutingPriority) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_base_proto_enumTypes[0].Descriptor()
+}
+
+func (RoutingPriority) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_base_proto_enumTypes[0]
+}
+
+func (x RoutingPriority) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RoutingPriority.Descriptor instead.
+func (RoutingPriority) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0}
+}
+
+// HTTP request method.
+type RequestMethod int32
+
+const (
+ RequestMethod_METHOD_UNSPECIFIED RequestMethod = 0
+ RequestMethod_GET RequestMethod = 1
+ RequestMethod_HEAD RequestMethod = 2
+ RequestMethod_POST RequestMethod = 3
+ RequestMethod_PUT RequestMethod = 4
+ RequestMethod_DELETE RequestMethod = 5
+ RequestMethod_CONNECT RequestMethod = 6
+ RequestMethod_OPTIONS RequestMethod = 7
+ RequestMethod_TRACE RequestMethod = 8
+ RequestMethod_PATCH RequestMethod = 9
+)
+
+// Enum value maps for RequestMethod.
+var (
+ RequestMethod_name = map[int32]string{
+ 0: "METHOD_UNSPECIFIED",
+ 1: "GET",
+ 2: "HEAD",
+ 3: "POST",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "CONNECT",
+ 7: "OPTIONS",
+ 8: "TRACE",
+ 9: "PATCH",
+ }
+ RequestMethod_value = map[string]int32{
+ "METHOD_UNSPECIFIED": 0,
+ "GET": 1,
+ "HEAD": 2,
+ "POST": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "CONNECT": 6,
+ "OPTIONS": 7,
+ "TRACE": 8,
+ "PATCH": 9,
+ }
+)
+
+func (x RequestMethod) Enum() *RequestMethod {
+ p := new(RequestMethod)
+ *p = x
+ return p
+}
+
+func (x RequestMethod) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RequestMethod) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_base_proto_enumTypes[1].Descriptor()
+}
+
+func (RequestMethod) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_base_proto_enumTypes[1]
+}
+
+func (x RequestMethod) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RequestMethod.Descriptor instead.
+func (RequestMethod) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1}
+}
+
+// Identifies the direction of the traffic relative to the local Envoy.
+type TrafficDirection int32
+
+const (
+ // Default option is unspecified.
+ TrafficDirection_UNSPECIFIED TrafficDirection = 0
+ // The transport is used for incoming traffic.
+ TrafficDirection_INBOUND TrafficDirection = 1
+ // The transport is used for outgoing traffic.
+ TrafficDirection_OUTBOUND TrafficDirection = 2
+)
+
+// Enum value maps for TrafficDirection.
+var (
+ TrafficDirection_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "INBOUND",
+ 2: "OUTBOUND",
+ }
+ TrafficDirection_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "INBOUND": 1,
+ "OUTBOUND": 2,
+ }
+)
+
+func (x TrafficDirection) Enum() *TrafficDirection {
+ p := new(TrafficDirection)
+ *p = x
+ return p
+}
+
+func (x TrafficDirection) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_base_proto_enumTypes[2].Descriptor()
+}
+
+func (TrafficDirection) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_base_proto_enumTypes[2]
+}
+
+func (x TrafficDirection) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use TrafficDirection.Descriptor instead.
+func (TrafficDirection) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2}
+}
+
+// Identifies location of where either Envoy runs or where upstream hosts run.
+type Locality struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Region this :ref:`zone ` belongs to.
+ Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"`
+ // Defines the local service zone where Envoy is running. Though optional, it
+ // should be set if discovery service routing is used and the discovery
+ // service exposes :ref:`zone data `,
+ // either in this message or via :option:`--service-zone`. The meaning of zone
+ // is context dependent, e.g. `Availability Zone (AZ)
+ // `_
+ // on AWS, `Zone `_ on
+ // GCP, etc.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"`
+ // When used for locality of upstream hosts, this field further splits zone
+ // into smaller chunks of sub-zones so they can be load balanced
+ // independently.
+ SubZone string `protobuf:"bytes,3,opt,name=sub_zone,json=subZone,proto3" json:"sub_zone,omitempty"`
+}
+
+func (x *Locality) Reset() {
+ *x = Locality{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Locality) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Locality) ProtoMessage() {}
+
+func (x *Locality) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Locality.ProtoReflect.Descriptor instead.
+func (*Locality) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Locality) GetRegion() string {
+ if x != nil {
+ return x.Region
+ }
+ return ""
+}
+
+func (x *Locality) GetZone() string {
+ if x != nil {
+ return x.Zone
+ }
+ return ""
+}
+
+func (x *Locality) GetSubZone() string {
+ if x != nil {
+ return x.SubZone
+ }
+ return ""
+}
+
+// BuildVersion combines SemVer version of extension with free-form build information
+// (i.e. 'alpha', 'private-build') as a set of strings.
+type BuildVersion struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // SemVer version of extension.
+ Version *v3.SemanticVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ // Free-form build information.
+ // Envoy defines several well known keys in the source/common/version/version.h file
+ Metadata *_struct.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
+}
+
+func (x *BuildVersion) Reset() {
+ *x = BuildVersion{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BuildVersion) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BuildVersion) ProtoMessage() {}
+
+func (x *BuildVersion) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BuildVersion.ProtoReflect.Descriptor instead.
+func (*BuildVersion) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *BuildVersion) GetVersion() *v3.SemanticVersion {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (x *BuildVersion) GetMetadata() *_struct.Struct {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+// Version and identification for an Envoy extension.
+// [#next-free-field: 6]
+type Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the name of the Envoy filter as specified in the Envoy
+ // configuration, e.g. envoy.filters.http.router, com.acme.widget.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Category of the extension.
+ // Extension category names use reverse DNS notation. For instance "envoy.filters.listener"
+ // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from
+ // acme.com vendor.
+ // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]
+ Category string `protobuf:"bytes,2,opt,name=category,proto3" json:"category,omitempty"`
+ // [#not-implemented-hide:] Type descriptor of extension configuration proto.
+ // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]
+ // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]
+ TypeDescriptor string `protobuf:"bytes,3,opt,name=type_descriptor,json=typeDescriptor,proto3" json:"type_descriptor,omitempty"`
+ // The version is a property of the extension and maintained independently
+ // of other extensions and the Envoy API.
+ // This field is not set when extension did not provide version information.
+ Version *BuildVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
+ // Indicates that the extension is present but was disabled via dynamic configuration.
+ Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"`
+}
+
+func (x *Extension) Reset() {
+ *x = Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Extension) ProtoMessage() {}
+
+func (x *Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Extension.ProtoReflect.Descriptor instead.
+func (*Extension) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Extension) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Extension) GetCategory() string {
+ if x != nil {
+ return x.Category
+ }
+ return ""
+}
+
+func (x *Extension) GetTypeDescriptor() string {
+ if x != nil {
+ return x.TypeDescriptor
+ }
+ return ""
+}
+
+func (x *Extension) GetVersion() *BuildVersion {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (x *Extension) GetDisabled() bool {
+ if x != nil {
+ return x.Disabled
+ }
+ return false
+}
+
+// Identifies a specific Envoy instance. The node identifier is presented to the
+// management server, which may use this identifier to distinguish per Envoy
+// configuration for serving.
+// [#next-free-field: 13]
+type Node struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An opaque node identifier for the Envoy node. This also provides the local
+ // service node name. It should be set if any of the following features are
+ // used: :ref:`statsd `, :ref:`CDS
+ // `, and :ref:`HTTP tracing
+ // `, either in this message or via
+ // :option:`--service-node`.
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Defines the local service cluster name where Envoy is running. Though
+ // optional, it should be set if any of the following features are used:
+ // :ref:`statsd `, :ref:`health check cluster
+ // verification
+ // `,
+ // :ref:`runtime override directory `,
+ // :ref:`user agent addition
+ // `,
+ // :ref:`HTTP global rate limiting `,
+ // :ref:`CDS `, and :ref:`HTTP tracing
+ // `, either in this message or via
+ // :option:`--service-cluster`.
+ Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ // Opaque metadata extending the node identifier. Envoy will pass this
+ // directly to the management server.
+ Metadata *_struct.Struct `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike
+ // other fields in this message). For example, the xDS client may have a shard identifier that
+ // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the
+ // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic
+ // parameter then appears in this field during future discovery requests.
+ DynamicParameters map[string]*v31.ContextParams `protobuf:"bytes,12,rep,name=dynamic_parameters,json=dynamicParameters,proto3" json:"dynamic_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Locality specifying where the Envoy instance is running.
+ Locality *Locality `protobuf:"bytes,4,opt,name=locality,proto3" json:"locality,omitempty"`
+ // Free-form string that identifies the entity requesting config.
+ // E.g. "envoy" or "grpc"
+ UserAgentName string `protobuf:"bytes,6,opt,name=user_agent_name,json=userAgentName,proto3" json:"user_agent_name,omitempty"`
+ // Types that are assignable to UserAgentVersionType:
+ // *Node_UserAgentVersion
+ // *Node_UserAgentBuildVersion
+ UserAgentVersionType isNode_UserAgentVersionType `protobuf_oneof:"user_agent_version_type"`
+ // List of extensions and their versions supported by the node.
+ Extensions []*Extension `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty"`
+ // Client feature support list. These are well known features described
+ // in the Envoy API repository for a given major version of an API. Client features
+ // use reverse DNS naming scheme, for example `com.acme.feature`.
+ // See :ref:`the list of features ` that xDS client may
+ // support.
+ ClientFeatures []string `protobuf:"bytes,10,rep,name=client_features,json=clientFeatures,proto3" json:"client_features,omitempty"`
+ // Known listening ports on the node as a generic hint to the management server
+ // for filtering :ref:`listeners ` to be returned. For example,
+ // if there is a listener bound to port 80, the list can optionally contain the
+ // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint.
+ //
+ // Deprecated: Do not use.
+ ListeningAddresses []*Address `protobuf:"bytes,11,rep,name=listening_addresses,json=listeningAddresses,proto3" json:"listening_addresses,omitempty"`
+ // Deprecated: Do not use.
+ HiddenEnvoyDeprecatedBuildVersion string `protobuf:"bytes,5,opt,name=hidden_envoy_deprecated_build_version,json=hiddenEnvoyDeprecatedBuildVersion,proto3" json:"hidden_envoy_deprecated_build_version,omitempty"`
+}
+
+func (x *Node) Reset() {
+ *x = Node{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Node) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Node) ProtoMessage() {}
+
+func (x *Node) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Node.ProtoReflect.Descriptor instead.
+func (*Node) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Node) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *Node) GetCluster() string {
+ if x != nil {
+ return x.Cluster
+ }
+ return ""
+}
+
+func (x *Node) GetMetadata() *_struct.Struct {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+func (x *Node) GetDynamicParameters() map[string]*v31.ContextParams {
+ if x != nil {
+ return x.DynamicParameters
+ }
+ return nil
+}
+
+func (x *Node) GetLocality() *Locality {
+ if x != nil {
+ return x.Locality
+ }
+ return nil
+}
+
+func (x *Node) GetUserAgentName() string {
+ if x != nil {
+ return x.UserAgentName
+ }
+ return ""
+}
+
+func (m *Node) GetUserAgentVersionType() isNode_UserAgentVersionType {
+ if m != nil {
+ return m.UserAgentVersionType
+ }
+ return nil
+}
+
+func (x *Node) GetUserAgentVersion() string {
+ if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentVersion); ok {
+ return x.UserAgentVersion
+ }
+ return ""
+}
+
+func (x *Node) GetUserAgentBuildVersion() *BuildVersion {
+ if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentBuildVersion); ok {
+ return x.UserAgentBuildVersion
+ }
+ return nil
+}
+
+func (x *Node) GetExtensions() []*Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Node) GetClientFeatures() []string {
+ if x != nil {
+ return x.ClientFeatures
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Node) GetListeningAddresses() []*Address {
+ if x != nil {
+ return x.ListeningAddresses
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Node) GetHiddenEnvoyDeprecatedBuildVersion() string {
+ if x != nil {
+ return x.HiddenEnvoyDeprecatedBuildVersion
+ }
+ return ""
+}
+
+type isNode_UserAgentVersionType interface {
+ isNode_UserAgentVersionType()
+}
+
+type Node_UserAgentVersion struct {
+ // Free-form string that identifies the version of the entity requesting config.
+ // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild"
+ UserAgentVersion string `protobuf:"bytes,7,opt,name=user_agent_version,json=userAgentVersion,proto3,oneof"`
+}
+
+type Node_UserAgentBuildVersion struct {
+ // Structured version of the entity requesting config.
+ UserAgentBuildVersion *BuildVersion `protobuf:"bytes,8,opt,name=user_agent_build_version,json=userAgentBuildVersion,proto3,oneof"`
+}
+
+func (*Node_UserAgentVersion) isNode_UserAgentVersionType() {}
+
+func (*Node_UserAgentBuildVersion) isNode_UserAgentVersionType() {}
+
+// Metadata provides additional inputs to filters based on matched listeners,
+// filter chains, routes and endpoints. It is structured as a map, usually from
+// filter name (in reverse DNS format) to metadata specific to the filter. Metadata
+// key-values for a filter are merged as connection and request handling occurs,
+// with later values for the same key overriding earlier values.
+//
+// An example use of metadata is providing additional values to
+// http_connection_manager in the envoy.http_connection_manager.access_log
+// namespace.
+//
+// Another example use of metadata is to per service config info in cluster metadata, which may get
+// consumed by multiple filters.
+//
+// For load balancing, Metadata provides a means to subset cluster endpoints.
+// Endpoints have a Metadata object associated and routes contain a Metadata
+// object to match against. There are some well defined metadata used today for
+// this purpose:
+//
+// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an
+// endpoint and is also used during header processing
+// (x-envoy-upstream-canary) and for stats purposes.
+// [#next-major-version: move to type/metadata/v2]
+type Metadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*
+ // namespace is reserved for Envoy's built-in filters.
+ // If both *filter_metadata* and
+ // :ref:`typed_filter_metadata `
+ // fields are present in the metadata with same keys,
+ // only *typed_filter_metadata* field will be parsed.
+ FilterMetadata map[string]*_struct.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*
+ // namespace is reserved for Envoy's built-in filters.
+ // The value is encoded as google.protobuf.Any.
+ // If both :ref:`filter_metadata `
+ // and *typed_filter_metadata* fields are present in the metadata with same keys,
+ // only *typed_filter_metadata* field will be parsed.
+ TypedFilterMetadata map[string]*any.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Metadata) Reset() {
+ *x = Metadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
+func (*Metadata) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Metadata) GetFilterMetadata() map[string]*_struct.Struct {
+ if x != nil {
+ return x.FilterMetadata
+ }
+ return nil
+}
+
+func (x *Metadata) GetTypedFilterMetadata() map[string]*any.Any {
+ if x != nil {
+ return x.TypedFilterMetadata
+ }
+ return nil
+}
+
+// Runtime derived uint32 with a default when not specified.
+type RuntimeUInt32 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue uint32 `protobuf:"varint,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined.
+ RuntimeKey string `protobuf:"bytes,3,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeUInt32) Reset() {
+ *x = RuntimeUInt32{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeUInt32) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeUInt32) ProtoMessage() {}
+
+func (x *RuntimeUInt32) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeUInt32.ProtoReflect.Descriptor instead.
+func (*RuntimeUInt32) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *RuntimeUInt32) GetDefaultValue() uint32 {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return 0
+}
+
+func (x *RuntimeUInt32) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Runtime derived percentage with a default when not specified.
+type RuntimePercent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue *v3.Percent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimePercent) Reset() {
+ *x = RuntimePercent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimePercent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimePercent) ProtoMessage() {}
+
+func (x *RuntimePercent) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimePercent.ProtoReflect.Descriptor instead.
+func (*RuntimePercent) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *RuntimePercent) GetDefaultValue() *v3.Percent {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+func (x *RuntimePercent) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Runtime derived double with a default when not specified.
+type RuntimeDouble struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue float64 `protobuf:"fixed64,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeDouble) Reset() {
+ *x = RuntimeDouble{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeDouble) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeDouble) ProtoMessage() {}
+
+func (x *RuntimeDouble) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeDouble.ProtoReflect.Descriptor instead.
+func (*RuntimeDouble) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *RuntimeDouble) GetDefaultValue() float64 {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return 0
+}
+
+func (x *RuntimeDouble) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Runtime derived bool with a default when not specified.
+type RuntimeFeatureFlag struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue *wrappers.BoolValue `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined. The boolean value must
+ // be represented via its
+ // `canonical JSON encoding `_.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeFeatureFlag) Reset() {
+ *x = RuntimeFeatureFlag{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeFeatureFlag) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeFeatureFlag) ProtoMessage() {}
+
+func (x *RuntimeFeatureFlag) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeFeatureFlag.ProtoReflect.Descriptor instead.
+func (*RuntimeFeatureFlag) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *RuntimeFeatureFlag) GetDefaultValue() *wrappers.BoolValue {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+func (x *RuntimeFeatureFlag) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Header name/value pair.
+type HeaderValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Header name.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // Header value.
+ //
+ // The same :ref:`format specifier ` as used for
+ // :ref:`HTTP access logging ` applies here, however
+ // unknown header values are replaced with the empty string instead of `-`.
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *HeaderValue) Reset() {
+ *x = HeaderValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderValue) ProtoMessage() {}
+
+func (x *HeaderValue) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead.
+func (*HeaderValue) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *HeaderValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *HeaderValue) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+// Header name/value pair plus option to control append behavior.
+type HeaderValueOption struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Header name/value pair that this option applies to.
+ Header *HeaderValue `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // Should the value be appended? If true (default), the value is appended to
+ // existing values. Otherwise it replaces any existing values.
+ Append *wrappers.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"`
+}
+
+func (x *HeaderValueOption) Reset() {
+ *x = HeaderValueOption{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderValueOption) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderValueOption) ProtoMessage() {}
+
+func (x *HeaderValueOption) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderValueOption.ProtoReflect.Descriptor instead.
+func (*HeaderValueOption) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *HeaderValueOption) GetHeader() *HeaderValue {
+ if x != nil {
+ return x.Header
+ }
+ return nil
+}
+
+func (x *HeaderValueOption) GetAppend() *wrappers.BoolValue {
+ if x != nil {
+ return x.Append
+ }
+ return nil
+}
+
+// Wrapper for a set of headers.
+type HeaderMap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Headers []*HeaderValue `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+}
+
+func (x *HeaderMap) Reset() {
+ *x = HeaderMap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderMap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderMap) ProtoMessage() {}
+
+func (x *HeaderMap) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderMap.ProtoReflect.Descriptor instead.
+func (*HeaderMap) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *HeaderMap) GetHeaders() []*HeaderValue {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename
+// events inside this directory trigger the watch.
+type WatchedDirectory struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Directory path to watch.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *WatchedDirectory) Reset() {
+ *x = WatchedDirectory{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *WatchedDirectory) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WatchedDirectory) ProtoMessage() {}
+
+func (x *WatchedDirectory) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WatchedDirectory.ProtoReflect.Descriptor instead.
+func (*WatchedDirectory) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *WatchedDirectory) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+// Data source consisting of either a file or an inline value.
+type DataSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Specifier:
+ // *DataSource_Filename
+ // *DataSource_InlineBytes
+ // *DataSource_InlineString
+ Specifier isDataSource_Specifier `protobuf_oneof:"specifier"`
+}
+
+func (x *DataSource) Reset() {
+ *x = DataSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DataSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DataSource) ProtoMessage() {}
+
+func (x *DataSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DataSource.ProtoReflect.Descriptor instead.
+func (*DataSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{13}
+}
+
+func (m *DataSource) GetSpecifier() isDataSource_Specifier {
+ if m != nil {
+ return m.Specifier
+ }
+ return nil
+}
+
+func (x *DataSource) GetFilename() string {
+ if x, ok := x.GetSpecifier().(*DataSource_Filename); ok {
+ return x.Filename
+ }
+ return ""
+}
+
+func (x *DataSource) GetInlineBytes() []byte {
+ if x, ok := x.GetSpecifier().(*DataSource_InlineBytes); ok {
+ return x.InlineBytes
+ }
+ return nil
+}
+
+func (x *DataSource) GetInlineString() string {
+ if x, ok := x.GetSpecifier().(*DataSource_InlineString); ok {
+ return x.InlineString
+ }
+ return ""
+}
+
+type isDataSource_Specifier interface {
+ isDataSource_Specifier()
+}
+
+type DataSource_Filename struct {
+ // Local filesystem data source.
+ Filename string `protobuf:"bytes,1,opt,name=filename,proto3,oneof"`
+}
+
+type DataSource_InlineBytes struct {
+ // Bytes inlined in the configuration.
+ InlineBytes []byte `protobuf:"bytes,2,opt,name=inline_bytes,json=inlineBytes,proto3,oneof"`
+}
+
+type DataSource_InlineString struct {
+ // String inlined in the configuration.
+ InlineString string `protobuf:"bytes,3,opt,name=inline_string,json=inlineString,proto3,oneof"`
+}
+
+func (*DataSource_Filename) isDataSource_Specifier() {}
+
+func (*DataSource_InlineBytes) isDataSource_Specifier() {}
+
+func (*DataSource_InlineString) isDataSource_Specifier() {}
+
+// The message specifies the retry policy of remote data source when fetching fails.
+type RetryPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies parameters that control :ref:`retry backoff strategy `.
+ // This parameter is optional, in which case the default base interval is 1000 milliseconds. The
+ // default maximum interval is 10 times the base interval.
+ RetryBackOff *BackoffStrategy `protobuf:"bytes,1,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"`
+ // Specifies the allowed number of retries. This parameter is optional and
+ // defaults to 1.
+ NumRetries *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"`
+}
+
+func (x *RetryPolicy) Reset() {
+ *x = RetryPolicy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RetryPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RetryPolicy) ProtoMessage() {}
+
+func (x *RetryPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead.
+func (*RetryPolicy) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *RetryPolicy) GetRetryBackOff() *BackoffStrategy {
+ if x != nil {
+ return x.RetryBackOff
+ }
+ return nil
+}
+
+func (x *RetryPolicy) GetNumRetries() *wrappers.UInt32Value {
+ if x != nil {
+ return x.NumRetries
+ }
+ return nil
+}
+
+// The message specifies how to fetch data from remote and how to verify it.
+type RemoteDataSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP URI to fetch the remote data.
+ HttpUri *HttpUri `protobuf:"bytes,1,opt,name=http_uri,json=httpUri,proto3" json:"http_uri,omitempty"`
+ // SHA256 string for verifying data.
+ Sha256 string `protobuf:"bytes,2,opt,name=sha256,proto3" json:"sha256,omitempty"`
+ // Retry policy for fetching remote data.
+ RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"`
+}
+
+func (x *RemoteDataSource) Reset() {
+ *x = RemoteDataSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoteDataSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoteDataSource) ProtoMessage() {}
+
+func (x *RemoteDataSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoteDataSource.ProtoReflect.Descriptor instead.
+func (*RemoteDataSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *RemoteDataSource) GetHttpUri() *HttpUri {
+ if x != nil {
+ return x.HttpUri
+ }
+ return nil
+}
+
+func (x *RemoteDataSource) GetSha256() string {
+ if x != nil {
+ return x.Sha256
+ }
+ return ""
+}
+
+func (x *RemoteDataSource) GetRetryPolicy() *RetryPolicy {
+ if x != nil {
+ return x.RetryPolicy
+ }
+ return nil
+}
+
+// Async data source which support async data fetch.
+type AsyncDataSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Specifier:
+ // *AsyncDataSource_Local
+ // *AsyncDataSource_Remote
+ Specifier isAsyncDataSource_Specifier `protobuf_oneof:"specifier"`
+}
+
+func (x *AsyncDataSource) Reset() {
+ *x = AsyncDataSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AsyncDataSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AsyncDataSource) ProtoMessage() {}
+
+func (x *AsyncDataSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AsyncDataSource.ProtoReflect.Descriptor instead.
+func (*AsyncDataSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{16}
+}
+
+func (m *AsyncDataSource) GetSpecifier() isAsyncDataSource_Specifier {
+ if m != nil {
+ return m.Specifier
+ }
+ return nil
+}
+
+func (x *AsyncDataSource) GetLocal() *DataSource {
+ if x, ok := x.GetSpecifier().(*AsyncDataSource_Local); ok {
+ return x.Local
+ }
+ return nil
+}
+
+func (x *AsyncDataSource) GetRemote() *RemoteDataSource {
+ if x, ok := x.GetSpecifier().(*AsyncDataSource_Remote); ok {
+ return x.Remote
+ }
+ return nil
+}
+
+type isAsyncDataSource_Specifier interface {
+ isAsyncDataSource_Specifier()
+}
+
+type AsyncDataSource_Local struct {
+ // Local async data source.
+ Local *DataSource `protobuf:"bytes,1,opt,name=local,proto3,oneof"`
+}
+
+type AsyncDataSource_Remote struct {
+ // Remote async data source.
+ Remote *RemoteDataSource `protobuf:"bytes,2,opt,name=remote,proto3,oneof"`
+}
+
+func (*AsyncDataSource_Local) isAsyncDataSource_Specifier() {}
+
+func (*AsyncDataSource_Remote) isAsyncDataSource_Specifier() {}
+
+// Configuration for transport socket in :ref:`listeners ` and
+// :ref:`clusters `. If the configuration is
+// empty, a default transport socket implementation and configuration will be
+// chosen based on the platform and existence of tls_context.
+type TransportSocket struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the transport socket to instantiate. The name must match a supported transport
+ // socket implementation.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Implementation specific configuration which depends on the implementation being instantiated.
+ // See the supported transport socket implementations for further documentation.
+ //
+ // Types that are assignable to ConfigType:
+ // *TransportSocket_TypedConfig
+ // *TransportSocket_HiddenEnvoyDeprecatedConfig
+ ConfigType isTransportSocket_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *TransportSocket) Reset() {
+ *x = TransportSocket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TransportSocket) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransportSocket) ProtoMessage() {}
+
+func (x *TransportSocket) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransportSocket.ProtoReflect.Descriptor instead.
+func (*TransportSocket) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *TransportSocket) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *TransportSocket) GetConfigType() isTransportSocket_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *TransportSocket) GetTypedConfig() *any.Any {
+ if x, ok := x.GetConfigType().(*TransportSocket_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *TransportSocket) GetHiddenEnvoyDeprecatedConfig() *_struct.Struct {
+ if x, ok := x.GetConfigType().(*TransportSocket_HiddenEnvoyDeprecatedConfig); ok {
+ return x.HiddenEnvoyDeprecatedConfig
+ }
+ return nil
+}
+
+type isTransportSocket_ConfigType interface {
+ isTransportSocket_ConfigType()
+}
+
+type TransportSocket_TypedConfig struct {
+ TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+type TransportSocket_HiddenEnvoyDeprecatedConfig struct {
+ // Deprecated: Do not use.
+ HiddenEnvoyDeprecatedConfig *_struct.Struct `protobuf:"bytes,2,opt,name=hidden_envoy_deprecated_config,json=hiddenEnvoyDeprecatedConfig,proto3,oneof"`
+}
+
+func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {}
+
+func (*TransportSocket_HiddenEnvoyDeprecatedConfig) isTransportSocket_ConfigType() {}
+
+// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not
+// specified via a runtime key.
+//
+// .. note::
+//
+// Parsing of the runtime key's data is implemented such that it may be represented as a
+// :ref:`FractionalPercent ` proto represented as JSON/YAML
+// and may also be represented as an integer with the assumption that the value is an integral
+// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse
+// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.
+type RuntimeFractionalPercent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if the runtime value's for the numerator/denominator keys are not available.
+ DefaultValue *v3.FractionalPercent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key for a YAML representation of a FractionalPercent.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeFractionalPercent) Reset() {
+ *x = RuntimeFractionalPercent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeFractionalPercent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeFractionalPercent) ProtoMessage() {}
+
+func (x *RuntimeFractionalPercent) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeFractionalPercent.ProtoReflect.Descriptor instead.
+func (*RuntimeFractionalPercent) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *RuntimeFractionalPercent) GetDefaultValue() *v3.FractionalPercent {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+func (x *RuntimeFractionalPercent) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Identifies a specific ControlPlane instance that Envoy is connected to.
+type ControlPlane struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An opaque control plane identifier that uniquely identifies an instance
+ // of control plane. This can be used to identify which control plane instance,
+ // the Envoy is connected to.
+ Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+}
+
+func (x *ControlPlane) Reset() {
+ *x = ControlPlane{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ControlPlane) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ControlPlane) ProtoMessage() {}
+
+func (x *ControlPlane) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ControlPlane.ProtoReflect.Descriptor instead.
+func (*ControlPlane) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *ControlPlane) GetIdentifier() string {
+ if x != nil {
+ return x.Identifier
+ }
+ return ""
+}
+
+var File_envoy_config_core_v3_base_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_base_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x74, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16,
+ 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75,
+ 0x62, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75,
+ 0x62, 0x5a, 0x6f, 0x6e, 0x65, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x42, 0x75, 0x69,
+ 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+ 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08,
+ 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a,
+ 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22,
+ 0xe2, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x27, 0x0a,
+ 0x0f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x79, 0x70, 0x65, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42,
+ 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64,
+ 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x06, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a,
+ 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x60, 0x0a, 0x12,
+ 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a,
+ 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79,
+ 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73,
+ 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x10, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x69, 0x6c,
+ 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x15, 0x75, 0x73, 0x65, 0x72,
+ 0x41, 0x67, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x3f, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x13, 0x6c,
+ 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04,
+ 0x03, 0x33, 0x2e, 0x30, 0x52, 0x12, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x41,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x25, 0x68, 0x69, 0x64, 0x64,
+ 0x65, 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04,
+ 0x03, 0x33, 0x2e, 0x30, 0x52, 0x21, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x45, 0x6e, 0x76, 0x6f,
+ 0x79, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x60, 0x0a, 0x16, 0x44, 0x79, 0x6e, 0x61, 0x6d,
+ 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18,
+ 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x19, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x72,
+ 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0xb1, 0x03, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x12, 0x5b, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x6b, 0x0a,
+ 0x15, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x79, 0x70,
+ 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x74, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5a, 0x0a, 0x13, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x18, 0x54, 0x79, 0x70, 0x65, 0x64, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28,
+ 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a,
+ 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32,
+ 0x22, 0x77, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72,
+ 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a,
+ 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62,
+ 0x6c, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x49, 0x0a, 0x0d, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x2b,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x7f, 0x0a, 0x0b, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01,
+ 0x28, 0x80, 0x80, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f,
+ 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, 0x80, 0x80, 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb8, 0x01, 0x0a,
+ 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x3a, 0x2a, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x4d, 0x61, 0x70, 0x12, 0x3b, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x4d, 0x61, 0x70, 0x22, 0x2f, 0x0a, 0x10, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64,
+ 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
+ 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xb6, 0x01, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
+ 0x48, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x42, 0x79, 0x74, 0x65,
+ 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69,
+ 0x6e, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a,
+ 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a,
+ 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22,
+ 0xd4, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
+ 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66,
+ 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42,
+ 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c,
+ 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0b,
+ 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74,
+ 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73,
+ 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x68,
+ 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x12,
+ 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36,
+ 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65,
+ 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x12,
+ 0x40, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74,
+ 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x73, 0x79, 0x6e,
+ 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73,
+ 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x8f, 0x02,
+ 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39,
+ 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79,
+ 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x1e, 0x68, 0x69, 0x64,
+ 0x64, 0x65, 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7,
+ 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00, 0x52, 0x1b, 0x68, 0x69, 0x64, 0x64, 0x65,
+ 0x6e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22,
+ 0xbf, 0x01, 0x0a, 0x18, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, 0x0d,
+ 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65,
+ 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x31,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65,
+ 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x22, 0x55, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e,
+ 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65,
+ 0x72, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, 0x52, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x44,
+ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48,
+ 0x10, 0x01, 0x2a, 0x89, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03,
+ 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x44, 0x10, 0x02, 0x12,
+ 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x55, 0x54,
+ 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05, 0x12, 0x0b,
+ 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x4f,
+ 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43,
+ 0x45, 0x10, 0x08, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, 0x10, 0x09, 0x2a, 0x3e,
+ 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01,
+ 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x39,
+ 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_base_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_base_proto_rawDescData = file_envoy_config_core_v3_base_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_base_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_base_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_base_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_base_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_base_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_base_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_envoy_config_core_v3_base_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_envoy_config_core_v3_base_proto_goTypes = []interface{}{
+ (RoutingPriority)(0), // 0: envoy.config.core.v3.RoutingPriority
+ (RequestMethod)(0), // 1: envoy.config.core.v3.RequestMethod
+ (TrafficDirection)(0), // 2: envoy.config.core.v3.TrafficDirection
+ (*Locality)(nil), // 3: envoy.config.core.v3.Locality
+ (*BuildVersion)(nil), // 4: envoy.config.core.v3.BuildVersion
+ (*Extension)(nil), // 5: envoy.config.core.v3.Extension
+ (*Node)(nil), // 6: envoy.config.core.v3.Node
+ (*Metadata)(nil), // 7: envoy.config.core.v3.Metadata
+ (*RuntimeUInt32)(nil), // 8: envoy.config.core.v3.RuntimeUInt32
+ (*RuntimePercent)(nil), // 9: envoy.config.core.v3.RuntimePercent
+ (*RuntimeDouble)(nil), // 10: envoy.config.core.v3.RuntimeDouble
+ (*RuntimeFeatureFlag)(nil), // 11: envoy.config.core.v3.RuntimeFeatureFlag
+ (*HeaderValue)(nil), // 12: envoy.config.core.v3.HeaderValue
+ (*HeaderValueOption)(nil), // 13: envoy.config.core.v3.HeaderValueOption
+ (*HeaderMap)(nil), // 14: envoy.config.core.v3.HeaderMap
+ (*WatchedDirectory)(nil), // 15: envoy.config.core.v3.WatchedDirectory
+ (*DataSource)(nil), // 16: envoy.config.core.v3.DataSource
+ (*RetryPolicy)(nil), // 17: envoy.config.core.v3.RetryPolicy
+ (*RemoteDataSource)(nil), // 18: envoy.config.core.v3.RemoteDataSource
+ (*AsyncDataSource)(nil), // 19: envoy.config.core.v3.AsyncDataSource
+ (*TransportSocket)(nil), // 20: envoy.config.core.v3.TransportSocket
+ (*RuntimeFractionalPercent)(nil), // 21: envoy.config.core.v3.RuntimeFractionalPercent
+ (*ControlPlane)(nil), // 22: envoy.config.core.v3.ControlPlane
+ nil, // 23: envoy.config.core.v3.Node.DynamicParametersEntry
+ nil, // 24: envoy.config.core.v3.Metadata.FilterMetadataEntry
+ nil, // 25: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry
+ (*v3.SemanticVersion)(nil), // 26: envoy.type.v3.SemanticVersion
+ (*_struct.Struct)(nil), // 27: google.protobuf.Struct
+ (*Address)(nil), // 28: envoy.config.core.v3.Address
+ (*v3.Percent)(nil), // 29: envoy.type.v3.Percent
+ (*wrappers.BoolValue)(nil), // 30: google.protobuf.BoolValue
+ (*BackoffStrategy)(nil), // 31: envoy.config.core.v3.BackoffStrategy
+ (*wrappers.UInt32Value)(nil), // 32: google.protobuf.UInt32Value
+ (*HttpUri)(nil), // 33: envoy.config.core.v3.HttpUri
+ (*any.Any)(nil), // 34: google.protobuf.Any
+ (*v3.FractionalPercent)(nil), // 35: envoy.type.v3.FractionalPercent
+ (*v31.ContextParams)(nil), // 36: xds.core.v3.ContextParams
+}
+var file_envoy_config_core_v3_base_proto_depIdxs = []int32{
+ 26, // 0: envoy.config.core.v3.BuildVersion.version:type_name -> envoy.type.v3.SemanticVersion
+ 27, // 1: envoy.config.core.v3.BuildVersion.metadata:type_name -> google.protobuf.Struct
+ 4, // 2: envoy.config.core.v3.Extension.version:type_name -> envoy.config.core.v3.BuildVersion
+ 27, // 3: envoy.config.core.v3.Node.metadata:type_name -> google.protobuf.Struct
+ 23, // 4: envoy.config.core.v3.Node.dynamic_parameters:type_name -> envoy.config.core.v3.Node.DynamicParametersEntry
+ 3, // 5: envoy.config.core.v3.Node.locality:type_name -> envoy.config.core.v3.Locality
+ 4, // 6: envoy.config.core.v3.Node.user_agent_build_version:type_name -> envoy.config.core.v3.BuildVersion
+ 5, // 7: envoy.config.core.v3.Node.extensions:type_name -> envoy.config.core.v3.Extension
+ 28, // 8: envoy.config.core.v3.Node.listening_addresses:type_name -> envoy.config.core.v3.Address
+ 24, // 9: envoy.config.core.v3.Metadata.filter_metadata:type_name -> envoy.config.core.v3.Metadata.FilterMetadataEntry
+ 25, // 10: envoy.config.core.v3.Metadata.typed_filter_metadata:type_name -> envoy.config.core.v3.Metadata.TypedFilterMetadataEntry
+ 29, // 11: envoy.config.core.v3.RuntimePercent.default_value:type_name -> envoy.type.v3.Percent
+ 30, // 12: envoy.config.core.v3.RuntimeFeatureFlag.default_value:type_name -> google.protobuf.BoolValue
+ 12, // 13: envoy.config.core.v3.HeaderValueOption.header:type_name -> envoy.config.core.v3.HeaderValue
+ 30, // 14: envoy.config.core.v3.HeaderValueOption.append:type_name -> google.protobuf.BoolValue
+ 12, // 15: envoy.config.core.v3.HeaderMap.headers:type_name -> envoy.config.core.v3.HeaderValue
+ 31, // 16: envoy.config.core.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.core.v3.BackoffStrategy
+ 32, // 17: envoy.config.core.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value
+ 33, // 18: envoy.config.core.v3.RemoteDataSource.http_uri:type_name -> envoy.config.core.v3.HttpUri
+ 17, // 19: envoy.config.core.v3.RemoteDataSource.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy
+ 16, // 20: envoy.config.core.v3.AsyncDataSource.local:type_name -> envoy.config.core.v3.DataSource
+ 18, // 21: envoy.config.core.v3.AsyncDataSource.remote:type_name -> envoy.config.core.v3.RemoteDataSource
+ 34, // 22: envoy.config.core.v3.TransportSocket.typed_config:type_name -> google.protobuf.Any
+ 27, // 23: envoy.config.core.v3.TransportSocket.hidden_envoy_deprecated_config:type_name -> google.protobuf.Struct
+ 35, // 24: envoy.config.core.v3.RuntimeFractionalPercent.default_value:type_name -> envoy.type.v3.FractionalPercent
+ 36, // 25: envoy.config.core.v3.Node.DynamicParametersEntry.value:type_name -> xds.core.v3.ContextParams
+ 27, // 26: envoy.config.core.v3.Metadata.FilterMetadataEntry.value:type_name -> google.protobuf.Struct
+ 34, // 27: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry.value:type_name -> google.protobuf.Any
+ 28, // [28:28] is the sub-list for method output_type
+ 28, // [28:28] is the sub-list for method input_type
+ 28, // [28:28] is the sub-list for extension type_name
+ 28, // [28:28] is the sub-list for extension extendee
+ 0, // [0:28] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_base_proto_init() }
+func file_envoy_config_core_v3_base_proto_init() {
+ if File_envoy_config_core_v3_base_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_address_proto_init()
+ file_envoy_config_core_v3_backoff_proto_init()
+ file_envoy_config_core_v3_http_uri_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_base_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Locality); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BuildVersion); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Node); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeUInt32); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimePercent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeDouble); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeFeatureFlag); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderValueOption); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderMap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*WatchedDirectory); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DataSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RetryPolicy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoteDataSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AsyncDataSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TransportSocket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeFractionalPercent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ControlPlane); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*Node_UserAgentVersion)(nil),
+ (*Node_UserAgentBuildVersion)(nil),
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[13].OneofWrappers = []interface{}{
+ (*DataSource_Filename)(nil),
+ (*DataSource_InlineBytes)(nil),
+ (*DataSource_InlineString)(nil),
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[16].OneofWrappers = []interface{}{
+ (*AsyncDataSource_Local)(nil),
+ (*AsyncDataSource_Remote)(nil),
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[17].OneofWrappers = []interface{}{
+ (*TransportSocket_TypedConfig)(nil),
+ (*TransportSocket_HiddenEnvoyDeprecatedConfig)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_base_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 23,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_base_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_base_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_base_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_base_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_base_proto = out.File
+ file_envoy_config_core_v3_base_proto_rawDesc = nil
+ file_envoy_config_core_v3_base_proto_goTypes = nil
+ file_envoy_config_core_v3_base_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go
new file mode 100644
index 000000000..ec720aa5a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go
@@ -0,0 +1,1822 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/base.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on Locality with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Locality) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Region
+
+ // no validation rules for Zone
+
+ // no validation rules for SubZone
+
+ return nil
+}
+
+// LocalityValidationError is the validation error returned by
+// Locality.Validate if the designated constraints aren't met.
+type LocalityValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e LocalityValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e LocalityValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e LocalityValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e LocalityValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e LocalityValidationError) ErrorName() string { return "LocalityValidationError" }
+
+// Error satisfies the builtin error interface
+func (e LocalityValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sLocality.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = LocalityValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = LocalityValidationError{}
+
+// Validate checks the field values on BuildVersion with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *BuildVersion) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BuildVersionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BuildVersionValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// BuildVersionValidationError is the validation error returned by
+// BuildVersion.Validate if the designated constraints aren't met.
+type BuildVersionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BuildVersionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BuildVersionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BuildVersionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BuildVersionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BuildVersionValidationError) ErrorName() string { return "BuildVersionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BuildVersionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBuildVersion.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BuildVersionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BuildVersionValidationError{}
+
+// Validate checks the field values on Extension with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Extension) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Name
+
+ // no validation rules for Category
+
+ // no validation rules for TypeDescriptor
+
+ if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtensionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Disabled
+
+ return nil
+}
+
+// ExtensionValidationError is the validation error returned by
+// Extension.Validate if the designated constraints aren't met.
+type ExtensionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ExtensionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ExtensionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ExtensionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ExtensionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ExtensionValidationError) ErrorName() string { return "ExtensionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ExtensionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sExtension.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ExtensionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ExtensionValidationError{}
+
+// Validate checks the field values on Node with the rules defined in the proto
+// definition for this message. If any rules are violated, an error is returned.
+func (m *Node) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Id
+
+ // no validation rules for Cluster
+
+ if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for key, val := range m.GetDynamicParameters() {
+ _ = val
+
+ // no validation rules for DynamicParameters[key]
+
+ if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: fmt.Sprintf("DynamicParameters[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for UserAgentName
+
+ for idx, item := range m.GetExtensions() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: fmt.Sprintf("Extensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetListeningAddresses() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: fmt.Sprintf("ListeningAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for HiddenEnvoyDeprecatedBuildVersion
+
+ switch m.UserAgentVersionType.(type) {
+
+ case *Node_UserAgentVersion:
+ // no validation rules for UserAgentVersion
+
+ case *Node_UserAgentBuildVersion:
+
+ if v, ok := interface{}(m.GetUserAgentBuildVersion()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: "UserAgentBuildVersion",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// NodeValidationError is the validation error returned by Node.Validate if the
+// designated constraints aren't met.
+type NodeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NodeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NodeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NodeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NodeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NodeValidationError) ErrorName() string { return "NodeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e NodeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNode.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NodeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NodeValidationError{}
+
+// Validate checks the field values on Metadata with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Metadata) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ for key, val := range m.GetFilterMetadata() {
+ _ = val
+
+ // no validation rules for FilterMetadata[key]
+
+ if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataValidationError{
+ field: fmt.Sprintf("FilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for key, val := range m.GetTypedFilterMetadata() {
+ _ = val
+
+ // no validation rules for TypedFilterMetadata[key]
+
+ if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataValidationError{
+ field: fmt.Sprintf("TypedFilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MetadataValidationError is the validation error returned by
+// Metadata.Validate if the designated constraints aren't met.
+type MetadataValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MetadataValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MetadataValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MetadataValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MetadataValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MetadataValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMetadata.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MetadataValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MetadataValidationError{}
+
+// Validate checks the field values on RuntimeUInt32 with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *RuntimeUInt32) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for DefaultValue
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ return RuntimeUInt32ValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ return nil
+}
+
+// RuntimeUInt32ValidationError is the validation error returned by
+// RuntimeUInt32.Validate if the designated constraints aren't met.
+type RuntimeUInt32ValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeUInt32ValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeUInt32ValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeUInt32ValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeUInt32ValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeUInt32ValidationError) ErrorName() string { return "RuntimeUInt32ValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeUInt32ValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeUInt32.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeUInt32ValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeUInt32ValidationError{}
+
+// Validate checks the field values on RuntimePercent with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *RuntimePercent) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimePercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ return RuntimePercentValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ return nil
+}
+
+// RuntimePercentValidationError is the validation error returned by
+// RuntimePercent.Validate if the designated constraints aren't met.
+type RuntimePercentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimePercentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimePercentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimePercentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimePercentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimePercentValidationError) ErrorName() string { return "RuntimePercentValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimePercentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimePercent.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimePercentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimePercentValidationError{}
+
+// Validate checks the field values on RuntimeDouble with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *RuntimeDouble) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for DefaultValue
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ return RuntimeDoubleValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ return nil
+}
+
+// RuntimeDoubleValidationError is the validation error returned by
+// RuntimeDouble.Validate if the designated constraints aren't met.
+type RuntimeDoubleValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeDoubleValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeDoubleValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeDoubleValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeDoubleValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeDoubleValidationError) ErrorName() string { return "RuntimeDoubleValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeDoubleValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeDouble.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeDoubleValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeDoubleValidationError{}
+
+// Validate checks the field values on RuntimeFeatureFlag with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *RuntimeFeatureFlag) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetDefaultValue() == nil {
+ return RuntimeFeatureFlagValidationError{
+ field: "DefaultValue",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeFeatureFlagValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ return RuntimeFeatureFlagValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ return nil
+}
+
+// RuntimeFeatureFlagValidationError is the validation error returned by
+// RuntimeFeatureFlag.Validate if the designated constraints aren't met.
+type RuntimeFeatureFlagValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeFeatureFlagValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeFeatureFlagValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeFeatureFlagValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeFeatureFlagValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeFeatureFlagValidationError) ErrorName() string {
+ return "RuntimeFeatureFlagValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeFeatureFlagValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeFeatureFlag.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeFeatureFlagValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeFeatureFlagValidationError{}
+
+// Validate checks the field values on HeaderValue with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *HeaderValue) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetKey()) < 1 {
+ return HeaderValueValidationError{
+ field: "Key",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if len(m.GetKey()) > 16384 {
+ return HeaderValueValidationError{
+ field: "Key",
+ reason: "value length must be at most 16384 bytes",
+ }
+ }
+
+ if !_HeaderValue_Key_Pattern.MatchString(m.GetKey()) {
+ return HeaderValueValidationError{
+ field: "Key",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ if len(m.GetValue()) > 16384 {
+ return HeaderValueValidationError{
+ field: "Value",
+ reason: "value length must be at most 16384 bytes",
+ }
+ }
+
+ if !_HeaderValue_Value_Pattern.MatchString(m.GetValue()) {
+ return HeaderValueValidationError{
+ field: "Value",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ return nil
+}
+
+// HeaderValueValidationError is the validation error returned by
+// HeaderValue.Validate if the designated constraints aren't met.
+type HeaderValueValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderValueValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderValueValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderValueValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderValueValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderValueValidationError) ErrorName() string { return "HeaderValueValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HeaderValueValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderValue.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderValueValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderValueValidationError{}
+
+var _HeaderValue_Key_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _HeaderValue_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HeaderValueOption with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *HeaderValueOption) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetHeader() == nil {
+ return HeaderValueOptionValidationError{
+ field: "Header",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderValueOptionValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderValueOptionValidationError{
+ field: "Append",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// HeaderValueOptionValidationError is the validation error returned by
+// HeaderValueOption.Validate if the designated constraints aren't met.
+type HeaderValueOptionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderValueOptionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderValueOptionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderValueOptionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderValueOptionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderValueOptionValidationError) ErrorName() string {
+ return "HeaderValueOptionValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HeaderValueOptionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderValueOption.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderValueOptionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderValueOptionValidationError{}
+
+// Validate checks the field values on HeaderMap with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *HeaderMap) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ for idx, item := range m.GetHeaders() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderMapValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// HeaderMapValidationError is the validation error returned by
+// HeaderMap.Validate if the designated constraints aren't met.
+type HeaderMapValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderMapValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderMapValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderMapValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderMapValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderMapValidationError) ErrorName() string { return "HeaderMapValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HeaderMapValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderMap.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderMapValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderMapValidationError{}
+
+// Validate checks the field values on WatchedDirectory with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *WatchedDirectory) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ return WatchedDirectoryValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ return nil
+}
+
+// WatchedDirectoryValidationError is the validation error returned by
+// WatchedDirectory.Validate if the designated constraints aren't met.
+type WatchedDirectoryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WatchedDirectoryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WatchedDirectoryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WatchedDirectoryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WatchedDirectoryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WatchedDirectoryValidationError) ErrorName() string { return "WatchedDirectoryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WatchedDirectoryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchedDirectory.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WatchedDirectoryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WatchedDirectoryValidationError{}
+
+// Validate checks the field values on DataSource with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *DataSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Specifier.(type) {
+
+ case *DataSource_Filename:
+
+ if utf8.RuneCountInString(m.GetFilename()) < 1 {
+ return DataSourceValidationError{
+ field: "Filename",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ case *DataSource_InlineBytes:
+ // no validation rules for InlineBytes
+
+ case *DataSource_InlineString:
+ // no validation rules for InlineString
+
+ default:
+ return DataSourceValidationError{
+ field: "Specifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// DataSourceValidationError is the validation error returned by
+// DataSource.Validate if the designated constraints aren't met.
+type DataSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DataSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DataSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DataSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DataSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DataSourceValidationError) ErrorName() string { return "DataSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DataSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDataSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DataSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DataSourceValidationError{}
+
+// Validate checks the field values on RetryPolicy with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *RetryPolicy) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetRetryBackOff()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RetryPolicyValidationError{
+ field: "RetryBackOff",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetNumRetries()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RetryPolicyValidationError{
+ field: "NumRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// RetryPolicyValidationError is the validation error returned by
+// RetryPolicy.Validate if the designated constraints aren't met.
+type RetryPolicyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RetryPolicyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RetryPolicyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RetryPolicyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RetryPolicyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RetryPolicyValidationError) ErrorName() string { return "RetryPolicyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RetryPolicyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRetryPolicy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RetryPolicyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RetryPolicyValidationError{}
+
+// Validate checks the field values on RemoteDataSource with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *RemoteDataSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetHttpUri() == nil {
+ return RemoteDataSourceValidationError{
+ field: "HttpUri",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetHttpUri()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RemoteDataSourceValidationError{
+ field: "HttpUri",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetSha256()) < 1 {
+ return RemoteDataSourceValidationError{
+ field: "Sha256",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RemoteDataSourceValidationError{
+ field: "RetryPolicy",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// RemoteDataSourceValidationError is the validation error returned by
+// RemoteDataSource.Validate if the designated constraints aren't met.
+type RemoteDataSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RemoteDataSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RemoteDataSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RemoteDataSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RemoteDataSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RemoteDataSourceValidationError) ErrorName() string { return "RemoteDataSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RemoteDataSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRemoteDataSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RemoteDataSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RemoteDataSourceValidationError{}
+
+// Validate checks the field values on AsyncDataSource with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *AsyncDataSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Specifier.(type) {
+
+ case *AsyncDataSource_Local:
+
+ if v, ok := interface{}(m.GetLocal()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AsyncDataSourceValidationError{
+ field: "Local",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AsyncDataSource_Remote:
+
+ if v, ok := interface{}(m.GetRemote()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AsyncDataSourceValidationError{
+ field: "Remote",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return AsyncDataSourceValidationError{
+ field: "Specifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// AsyncDataSourceValidationError is the validation error returned by
+// AsyncDataSource.Validate if the designated constraints aren't met.
+type AsyncDataSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AsyncDataSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AsyncDataSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AsyncDataSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AsyncDataSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AsyncDataSourceValidationError) ErrorName() string { return "AsyncDataSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AsyncDataSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAsyncDataSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AsyncDataSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AsyncDataSourceValidationError{}
+
+// Validate checks the field values on TransportSocket with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *TransportSocket) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ return TransportSocketValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ switch m.ConfigType.(type) {
+
+ case *TransportSocket_TypedConfig:
+
+ if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TransportSocketValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *TransportSocket_HiddenEnvoyDeprecatedConfig:
+
+ if v, ok := interface{}(m.GetHiddenEnvoyDeprecatedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TransportSocketValidationError{
+ field: "HiddenEnvoyDeprecatedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// TransportSocketValidationError is the validation error returned by
+// TransportSocket.Validate if the designated constraints aren't met.
+type TransportSocketValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TransportSocketValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TransportSocketValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TransportSocketValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TransportSocketValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TransportSocketValidationError) ErrorName() string { return "TransportSocketValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TransportSocketValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTransportSocket.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TransportSocketValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TransportSocketValidationError{}
+
+// Validate checks the field values on RuntimeFractionalPercent with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *RuntimeFractionalPercent) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetDefaultValue() == nil {
+ return RuntimeFractionalPercentValidationError{
+ field: "DefaultValue",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeFractionalPercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for RuntimeKey
+
+ return nil
+}
+
+// RuntimeFractionalPercentValidationError is the validation error returned by
+// RuntimeFractionalPercent.Validate if the designated constraints aren't met.
+type RuntimeFractionalPercentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeFractionalPercentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeFractionalPercentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeFractionalPercentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeFractionalPercentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeFractionalPercentValidationError) ErrorName() string {
+ return "RuntimeFractionalPercentValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeFractionalPercentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeFractionalPercent.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeFractionalPercentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeFractionalPercentValidationError{}
+
+// Validate checks the field values on ControlPlane with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *ControlPlane) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Identifier
+
+ return nil
+}
+
+// ControlPlaneValidationError is the validation error returned by
+// ControlPlane.Validate if the designated constraints aren't met.
+type ControlPlaneValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ControlPlaneValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ControlPlaneValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ControlPlaneValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ControlPlaneValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ControlPlaneValidationError) ErrorName() string { return "ControlPlaneValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ControlPlaneValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sControlPlane.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ControlPlaneValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ControlPlaneValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go
new file mode 100644
index 000000000..4663e7fcc
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go
@@ -0,0 +1,920 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/config_source.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// xDS API and non-xDS services version. This is used to describe both resource and transport
+// protocol versions (in distinct configuration fields).
+type ApiVersion int32
+
+const (
+ // When not specified, we assume v2, to ease migration to Envoy's stable API
+ // versioning. If a client does not support v2 (e.g. due to deprecation), this
+ // is an invalid value.
+ //
+ // Deprecated: Do not use.
+ ApiVersion_AUTO ApiVersion = 0
+ // Use xDS v2 API.
+ //
+ // Deprecated: Do not use.
+ ApiVersion_V2 ApiVersion = 1
+ // Use xDS v3 API.
+ ApiVersion_V3 ApiVersion = 2
+)
+
+// Enum value maps for ApiVersion.
+var (
+ ApiVersion_name = map[int32]string{
+ 0: "AUTO",
+ 1: "V2",
+ 2: "V3",
+ }
+ ApiVersion_value = map[string]int32{
+ "AUTO": 0,
+ "V2": 1,
+ "V3": 2,
+ }
+)
+
+func (x ApiVersion) Enum() *ApiVersion {
+ p := new(ApiVersion)
+ *p = x
+ return p
+}
+
+func (x ApiVersion) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ApiVersion) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_config_source_proto_enumTypes[0].Descriptor()
+}
+
+func (ApiVersion) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_config_source_proto_enumTypes[0]
+}
+
+func (x ApiVersion) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ApiVersion.Descriptor instead.
+func (ApiVersion) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0}
+}
+
+// APIs may be fetched via either REST or gRPC.
+type ApiConfigSource_ApiType int32
+
+const (
+ // Ideally this would be 'reserved 0' but one can't reserve the default
+ // value. Instead we throw an exception if this is ever used.
+ //
+ // Deprecated: Do not use.
+ ApiConfigSource_hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY ApiConfigSource_ApiType = 0
+ // REST-JSON v2 API. The `canonical JSON encoding
+ // `_ for
+ // the v2 protos is used.
+ ApiConfigSource_REST ApiConfigSource_ApiType = 1
+ // SotW gRPC service.
+ ApiConfigSource_GRPC ApiConfigSource_ApiType = 2
+ // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}
+ // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state
+ // with every update, the xDS server only sends what has changed since the last update.
+ ApiConfigSource_DELTA_GRPC ApiConfigSource_ApiType = 3
+ // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be
+ // multiplexed on a single connection to an ADS endpoint.
+ // [#not-implemented-hide:]
+ ApiConfigSource_AGGREGATED_GRPC ApiConfigSource_ApiType = 5
+ // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be
+ // multiplexed on a single connection to an ADS endpoint.
+ // [#not-implemented-hide:]
+ ApiConfigSource_AGGREGATED_DELTA_GRPC ApiConfigSource_ApiType = 6
+)
+
+// Enum value maps for ApiConfigSource_ApiType.
+var (
+ ApiConfigSource_ApiType_name = map[int32]string{
+ 0: "hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY",
+ 1: "REST",
+ 2: "GRPC",
+ 3: "DELTA_GRPC",
+ 5: "AGGREGATED_GRPC",
+ 6: "AGGREGATED_DELTA_GRPC",
+ }
+ ApiConfigSource_ApiType_value = map[string]int32{
+ "hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY": 0,
+ "REST": 1,
+ "GRPC": 2,
+ "DELTA_GRPC": 3,
+ "AGGREGATED_GRPC": 5,
+ "AGGREGATED_DELTA_GRPC": 6,
+ }
+)
+
+func (x ApiConfigSource_ApiType) Enum() *ApiConfigSource_ApiType {
+ p := new(ApiConfigSource_ApiType)
+ *p = x
+ return p
+}
+
+func (x ApiConfigSource_ApiType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ApiConfigSource_ApiType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_config_source_proto_enumTypes[1].Descriptor()
+}
+
+func (ApiConfigSource_ApiType) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_config_source_proto_enumTypes[1]
+}
+
+func (x ApiConfigSource_ApiType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ApiConfigSource_ApiType.Descriptor instead.
+func (ApiConfigSource_ApiType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// API configuration source. This identifies the API type and cluster that Envoy
+// will use to fetch an xDS API.
+// [#next-free-field: 9]
+type ApiConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // API type (gRPC, REST, delta gRPC)
+ ApiType ApiConfigSource_ApiType `protobuf:"varint,1,opt,name=api_type,json=apiType,proto3,enum=envoy.config.core.v3.ApiConfigSource_ApiType" json:"api_type,omitempty"`
+ // API version for xDS transport protocol. This describes the xDS gRPC/REST
+ // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.
+ TransportApiVersion ApiVersion `protobuf:"varint,8,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"`
+ // Cluster names should be used only with REST. If > 1
+ // cluster is defined, clusters will be cycled through if any kind of failure
+ // occurs.
+ //
+ // .. note::
+ //
+ // The cluster with name ``cluster_name`` must be statically defined and its
+ // type must not be ``EDS``.
+ ClusterNames []string `protobuf:"bytes,2,rep,name=cluster_names,json=clusterNames,proto3" json:"cluster_names,omitempty"`
+ // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,
+ // services will be cycled through if any kind of failure occurs.
+ GrpcServices []*GrpcService `protobuf:"bytes,4,rep,name=grpc_services,json=grpcServices,proto3" json:"grpc_services,omitempty"`
+ // For REST APIs, the delay between successive polls.
+ RefreshDelay *duration.Duration `protobuf:"bytes,3,opt,name=refresh_delay,json=refreshDelay,proto3" json:"refresh_delay,omitempty"`
+ // For REST APIs, the request timeout. If not set, a default value of 1s will be used.
+ RequestTimeout *duration.Duration `protobuf:"bytes,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"`
+ // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be
+ // rate limited.
+ RateLimitSettings *RateLimitSettings `protobuf:"bytes,6,opt,name=rate_limit_settings,json=rateLimitSettings,proto3" json:"rate_limit_settings,omitempty"`
+ // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.
+ SetNodeOnFirstMessageOnly bool `protobuf:"varint,7,opt,name=set_node_on_first_message_only,json=setNodeOnFirstMessageOnly,proto3" json:"set_node_on_first_message_only,omitempty"`
+}
+
+func (x *ApiConfigSource) Reset() {
+ *x = ApiConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ApiConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApiConfigSource) ProtoMessage() {}
+
+func (x *ApiConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApiConfigSource.ProtoReflect.Descriptor instead.
+func (*ApiConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ApiConfigSource) GetApiType() ApiConfigSource_ApiType {
+ if x != nil {
+ return x.ApiType
+ }
+ return ApiConfigSource_hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY
+}
+
+func (x *ApiConfigSource) GetTransportApiVersion() ApiVersion {
+ if x != nil {
+ return x.TransportApiVersion
+ }
+ return ApiVersion_AUTO
+}
+
+func (x *ApiConfigSource) GetClusterNames() []string {
+ if x != nil {
+ return x.ClusterNames
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetGrpcServices() []*GrpcService {
+ if x != nil {
+ return x.GrpcServices
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetRefreshDelay() *duration.Duration {
+ if x != nil {
+ return x.RefreshDelay
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetRequestTimeout() *duration.Duration {
+ if x != nil {
+ return x.RequestTimeout
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetRateLimitSettings() *RateLimitSettings {
+ if x != nil {
+ return x.RateLimitSettings
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetSetNodeOnFirstMessageOnly() bool {
+ if x != nil {
+ return x.SetNodeOnFirstMessageOnly
+ }
+ return false
+}
+
+// Aggregated Discovery Service (ADS) options. This is currently empty, but when
+// set in :ref:`ConfigSource ` can be used to
+// specify that ADS is to be used.
+type AggregatedConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AggregatedConfigSource) Reset() {
+ *x = AggregatedConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AggregatedConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AggregatedConfigSource) ProtoMessage() {}
+
+func (x *AggregatedConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AggregatedConfigSource.ProtoReflect.Descriptor instead.
+func (*AggregatedConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{1}
+}
+
+// [#not-implemented-hide:]
+// Self-referencing config source options. This is currently empty, but when
+// set in :ref:`ConfigSource ` can be used to
+// specify that other data can be obtained from the same server.
+type SelfConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // API version for xDS transport protocol. This describes the xDS gRPC/REST
+ // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.
+ TransportApiVersion ApiVersion `protobuf:"varint,1,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"`
+}
+
+func (x *SelfConfigSource) Reset() {
+ *x = SelfConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SelfConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SelfConfigSource) ProtoMessage() {}
+
+func (x *SelfConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SelfConfigSource.ProtoReflect.Descriptor instead.
+func (*SelfConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SelfConfigSource) GetTransportApiVersion() ApiVersion {
+ if x != nil {
+ return x.TransportApiVersion
+ }
+ return ApiVersion_AUTO
+}
+
+// Rate Limit settings to be applied for discovery requests made by Envoy.
+type RateLimitSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a
+ // default value of 100 will be used.
+ MaxTokens *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"`
+ // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens
+ // per second will be used.
+ FillRate *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=fill_rate,json=fillRate,proto3" json:"fill_rate,omitempty"`
+}
+
+func (x *RateLimitSettings) Reset() {
+ *x = RateLimitSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RateLimitSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RateLimitSettings) ProtoMessage() {}
+
+func (x *RateLimitSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RateLimitSettings.ProtoReflect.Descriptor instead.
+func (*RateLimitSettings) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *RateLimitSettings) GetMaxTokens() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxTokens
+ }
+ return nil
+}
+
+func (x *RateLimitSettings) GetFillRate() *wrappers.DoubleValue {
+ if x != nil {
+ return x.FillRate
+ }
+ return nil
+}
+
+// Configuration for :ref:`listeners `, :ref:`clusters
+// `, :ref:`routes
+// `, :ref:`endpoints
+// ` etc. may either be sourced from the
+// filesystem or from an xDS API source. Filesystem configs are watched with
+// inotify for updates.
+// [#next-free-field: 8]
+type ConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Authorities that this config source may be used for. An authority specified in a xdstp:// URL
+ // is resolved to a *ConfigSource* prior to configuration fetch. This field provides the
+ // association between authority name and configuration source.
+ // [#not-implemented-hide:]
+ Authorities []*v3.Authority `protobuf:"bytes,7,rep,name=authorities,proto3" json:"authorities,omitempty"`
+ // Types that are assignable to ConfigSourceSpecifier:
+ // *ConfigSource_Path
+ // *ConfigSource_ApiConfigSource
+ // *ConfigSource_Ads
+ // *ConfigSource_Self
+ ConfigSourceSpecifier isConfigSource_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"`
+ // When this timeout is specified, Envoy will wait no longer than the specified time for first
+ // config response on this xDS subscription during the :ref:`initialization process
+ // `. After reaching the timeout, Envoy will move to the next
+ // initialization phase, even if the first config is not delivered yet. The timer is activated
+ // when the xDS API subscription starts, and is disarmed on first config update or on error. 0
+ // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another
+ // timeout applies). The default is 15s.
+ InitialFetchTimeout *duration.Duration `protobuf:"bytes,4,opt,name=initial_fetch_timeout,json=initialFetchTimeout,proto3" json:"initial_fetch_timeout,omitempty"`
+ // API version for xDS resources. This implies the type URLs that the client
+ // will request for resources and the resource type that the client will in
+ // turn expect to be delivered.
+ ResourceApiVersion ApiVersion `protobuf:"varint,6,opt,name=resource_api_version,json=resourceApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"resource_api_version,omitempty"`
+}
+
+func (x *ConfigSource) Reset() {
+ *x = ConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigSource) ProtoMessage() {}
+
+func (x *ConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigSource.ProtoReflect.Descriptor instead.
+func (*ConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ConfigSource) GetAuthorities() []*v3.Authority {
+ if x != nil {
+ return x.Authorities
+ }
+ return nil
+}
+
+func (m *ConfigSource) GetConfigSourceSpecifier() isConfigSource_ConfigSourceSpecifier {
+ if m != nil {
+ return m.ConfigSourceSpecifier
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetPath() string {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Path); ok {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *ConfigSource) GetApiConfigSource() *ApiConfigSource {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_ApiConfigSource); ok {
+ return x.ApiConfigSource
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetAds() *AggregatedConfigSource {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Ads); ok {
+ return x.Ads
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetSelf() *SelfConfigSource {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Self); ok {
+ return x.Self
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetInitialFetchTimeout() *duration.Duration {
+ if x != nil {
+ return x.InitialFetchTimeout
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetResourceApiVersion() ApiVersion {
+ if x != nil {
+ return x.ResourceApiVersion
+ }
+ return ApiVersion_AUTO
+}
+
+type isConfigSource_ConfigSourceSpecifier interface {
+ isConfigSource_ConfigSourceSpecifier()
+}
+
+type ConfigSource_Path struct {
+ // Path on the filesystem to source and watch for configuration updates.
+ // When sourcing configuration for :ref:`secret `,
+ // the certificate and key files are also watched for updates.
+ //
+ // .. note::
+ //
+ // The path to the source must exist at config load time.
+ //
+ // .. note::
+ //
+ // Envoy will only watch the file path for *moves.* This is because in general only moves
+ // are atomic. The same method of swapping files as is demonstrated in the
+ // :ref:`runtime documentation ` can be used here also.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3,oneof"`
+}
+
+type ConfigSource_ApiConfigSource struct {
+ // API configuration source.
+ ApiConfigSource *ApiConfigSource `protobuf:"bytes,2,opt,name=api_config_source,json=apiConfigSource,proto3,oneof"`
+}
+
+type ConfigSource_Ads struct {
+ // When set, ADS will be used to fetch resources. The ADS API configuration
+ // source in the bootstrap configuration is used.
+ Ads *AggregatedConfigSource `protobuf:"bytes,3,opt,name=ads,proto3,oneof"`
+}
+
+type ConfigSource_Self struct {
+ // [#not-implemented-hide:]
+ // When set, the client will access the resources from the same server it got the
+ // ConfigSource from, although not necessarily from the same stream. This is similar to the
+ // :ref:`ads` field, except that the client may use a
+ // different stream to the same server. As a result, this field can be used for things
+ // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)
+ // LDS to RDS on the same server without requiring the management server to know its name
+ // or required credentials.
+ // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since
+ // this field can implicitly mean to use the same stream in the case where the ConfigSource
+ // is provided via ADS and the specified data can also be obtained via ADS.]
+ Self *SelfConfigSource `protobuf:"bytes,5,opt,name=self,proto3,oneof"`
+}
+
+func (*ConfigSource_Path) isConfigSource_ConfigSourceSpecifier() {}
+
+func (*ConfigSource_ApiConfigSource) isConfigSource_ConfigSourceSpecifier() {}
+
+func (*ConfigSource_Ads) isConfigSource_ConfigSourceSpecifier() {}
+
+func (*ConfigSource_Self) isConfigSource_ConfigSourceSpecifier() {}
+
+var File_envoy_config_core_v3_config_source_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_config_source_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70,
+ 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61,
+ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x06, 0x0a, 0x0f, 0x41, 0x70, 0x69, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x08, 0x61, 0x70,
+ 0x69, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x12, 0x5e,
+ 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, 0x69, 0x5f,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, 0x73,
+ 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x23,
+ 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x67,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x72,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x72,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x4c, 0x0a, 0x0f, 0x72,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x72, 0x61, 0x74,
+ 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61,
+ 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+ 0x11, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
+ 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1e, 0x73, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6f,
+ 0x6e, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
+ 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x65, 0x74, 0x4e,
+ 0x6f, 0x64, 0x65, 0x4f, 0x6e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xa5, 0x01, 0x0a, 0x07, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x46, 0x0a, 0x2f, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x55, 0x4e, 0x53,
+ 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x5f, 0x4c, 0x45,
+ 0x47, 0x41, 0x43, 0x59, 0x10, 0x00, 0x1a, 0x11, 0x08, 0x01, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01,
+ 0x8a, 0xf4, 0x9b, 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x53,
+ 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x02, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x03, 0x12, 0x13, 0x0a,
+ 0x0f, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x47, 0x52, 0x50, 0x43,
+ 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x45, 0x44,
+ 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x06, 0x3a, 0x28, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x49, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65,
+ 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72,
+ 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x22, 0x9d, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73,
+ 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70,
+ 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02,
+ 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x72, 0x61,
+ 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c,
+ 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x21, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x6c, 0x52, 0x61, 0x74, 0x65,
+ 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c,
+ 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xa7, 0x04, 0x0a,
+ 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a,
+ 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x53, 0x0a,
+ 0x11, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48,
+ 0x00, 0x52, 0x0f, 0x61, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x12, 0x40, 0x0a, 0x03, 0x61, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65,
+ 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52,
+ 0x03, 0x61, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x04, 0x73, 0x65,
+ 0x6c, 0x66, 0x12, 0x4d, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x66, 0x65,
+ 0x74, 0x63, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x69, 0x6e,
+ 0x69, 0x74, 0x69, 0x61, 0x6c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x12, 0x5c, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x70,
+ 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x12, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a,
+ 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65,
+ 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x2a, 0x40, 0x0a, 0x0a, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, 0x1a, 0x0b,
+ 0x08, 0x01, 0x8a, 0xf4, 0x9b, 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30, 0x12, 0x13, 0x0a, 0x02, 0x56,
+ 0x32, 0x10, 0x01, 0x1a, 0x0b, 0x08, 0x01, 0x8a, 0xf4, 0x9b, 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30,
+ 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x02, 0x42, 0x41, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_config_source_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_config_source_proto_rawDescData = file_envoy_config_core_v3_config_source_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_config_source_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_config_source_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_config_source_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_config_source_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_config_source_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_config_source_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_envoy_config_core_v3_config_source_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_envoy_config_core_v3_config_source_proto_goTypes = []interface{}{
+ (ApiVersion)(0), // 0: envoy.config.core.v3.ApiVersion
+ (ApiConfigSource_ApiType)(0), // 1: envoy.config.core.v3.ApiConfigSource.ApiType
+ (*ApiConfigSource)(nil), // 2: envoy.config.core.v3.ApiConfigSource
+ (*AggregatedConfigSource)(nil), // 3: envoy.config.core.v3.AggregatedConfigSource
+ (*SelfConfigSource)(nil), // 4: envoy.config.core.v3.SelfConfigSource
+ (*RateLimitSettings)(nil), // 5: envoy.config.core.v3.RateLimitSettings
+ (*ConfigSource)(nil), // 6: envoy.config.core.v3.ConfigSource
+ (*GrpcService)(nil), // 7: envoy.config.core.v3.GrpcService
+ (*duration.Duration)(nil), // 8: google.protobuf.Duration
+ (*wrappers.UInt32Value)(nil), // 9: google.protobuf.UInt32Value
+ (*wrappers.DoubleValue)(nil), // 10: google.protobuf.DoubleValue
+ (*v3.Authority)(nil), // 11: xds.core.v3.Authority
+}
+var file_envoy_config_core_v3_config_source_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.ApiConfigSource.api_type:type_name -> envoy.config.core.v3.ApiConfigSource.ApiType
+ 0, // 1: envoy.config.core.v3.ApiConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion
+ 7, // 2: envoy.config.core.v3.ApiConfigSource.grpc_services:type_name -> envoy.config.core.v3.GrpcService
+ 8, // 3: envoy.config.core.v3.ApiConfigSource.refresh_delay:type_name -> google.protobuf.Duration
+ 8, // 4: envoy.config.core.v3.ApiConfigSource.request_timeout:type_name -> google.protobuf.Duration
+ 5, // 5: envoy.config.core.v3.ApiConfigSource.rate_limit_settings:type_name -> envoy.config.core.v3.RateLimitSettings
+ 0, // 6: envoy.config.core.v3.SelfConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion
+ 9, // 7: envoy.config.core.v3.RateLimitSettings.max_tokens:type_name -> google.protobuf.UInt32Value
+ 10, // 8: envoy.config.core.v3.RateLimitSettings.fill_rate:type_name -> google.protobuf.DoubleValue
+ 11, // 9: envoy.config.core.v3.ConfigSource.authorities:type_name -> xds.core.v3.Authority
+ 2, // 10: envoy.config.core.v3.ConfigSource.api_config_source:type_name -> envoy.config.core.v3.ApiConfigSource
+ 3, // 11: envoy.config.core.v3.ConfigSource.ads:type_name -> envoy.config.core.v3.AggregatedConfigSource
+ 4, // 12: envoy.config.core.v3.ConfigSource.self:type_name -> envoy.config.core.v3.SelfConfigSource
+ 8, // 13: envoy.config.core.v3.ConfigSource.initial_fetch_timeout:type_name -> google.protobuf.Duration
+ 0, // 14: envoy.config.core.v3.ConfigSource.resource_api_version:type_name -> envoy.config.core.v3.ApiVersion
+ 15, // [15:15] is the sub-list for method output_type
+ 15, // [15:15] is the sub-list for method input_type
+ 15, // [15:15] is the sub-list for extension type_name
+ 15, // [15:15] is the sub-list for extension extendee
+ 0, // [0:15] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_config_source_proto_init() }
+func file_envoy_config_core_v3_config_source_proto_init() {
+ if File_envoy_config_core_v3_config_source_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_grpc_service_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_config_source_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ApiConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AggregatedConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SelfConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RateLimitSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[4].OneofWrappers = []interface{}{
+ (*ConfigSource_Path)(nil),
+ (*ConfigSource_ApiConfigSource)(nil),
+ (*ConfigSource_Ads)(nil),
+ (*ConfigSource_Self)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_config_source_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_config_source_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_config_source_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_config_source_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_config_source_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_config_source_proto = out.File
+ file_envoy_config_core_v3_config_source_proto_rawDesc = nil
+ file_envoy_config_core_v3_config_source_proto_goTypes = nil
+ file_envoy_config_core_v3_config_source_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go
new file mode 100644
index 000000000..11a22692a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go
@@ -0,0 +1,544 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/config_source.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on ApiConfigSource with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *ApiConfigSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if _, ok := ApiConfigSource_ApiType_name[int32(m.GetApiType())]; !ok {
+ return ApiConfigSourceValidationError{
+ field: "ApiType",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok {
+ return ApiConfigSourceValidationError{
+ field: "TransportApiVersion",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ for idx, item := range m.GetGrpcServices() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ApiConfigSourceValidationError{
+ field: fmt.Sprintf("GrpcServices[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetRefreshDelay()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ApiConfigSourceValidationError{
+ field: "RefreshDelay",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetRequestTimeout(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return ApiConfigSourceValidationError{
+ field: "RequestTimeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return ApiConfigSourceValidationError{
+ field: "RequestTimeout",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetRateLimitSettings()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ApiConfigSourceValidationError{
+ field: "RateLimitSettings",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for SetNodeOnFirstMessageOnly
+
+ return nil
+}
+
+// ApiConfigSourceValidationError is the validation error returned by
+// ApiConfigSource.Validate if the designated constraints aren't met.
+type ApiConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ApiConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ApiConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ApiConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ApiConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ApiConfigSourceValidationError) ErrorName() string { return "ApiConfigSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ApiConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sApiConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ApiConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ApiConfigSourceValidationError{}
+
+// Validate checks the field values on AggregatedConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *AggregatedConfigSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// AggregatedConfigSourceValidationError is the validation error returned by
+// AggregatedConfigSource.Validate if the designated constraints aren't met.
+type AggregatedConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AggregatedConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AggregatedConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AggregatedConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AggregatedConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AggregatedConfigSourceValidationError) ErrorName() string {
+ return "AggregatedConfigSourceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e AggregatedConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAggregatedConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AggregatedConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AggregatedConfigSourceValidationError{}
+
+// Validate checks the field values on SelfConfigSource with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *SelfConfigSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok {
+ return SelfConfigSourceValidationError{
+ field: "TransportApiVersion",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ return nil
+}
+
+// SelfConfigSourceValidationError is the validation error returned by
+// SelfConfigSource.Validate if the designated constraints aren't met.
+type SelfConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SelfConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SelfConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SelfConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SelfConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SelfConfigSourceValidationError) ErrorName() string { return "SelfConfigSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SelfConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSelfConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SelfConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SelfConfigSourceValidationError{}
+
+// Validate checks the field values on RateLimitSettings with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *RateLimitSettings) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetMaxTokens()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RateLimitSettingsValidationError{
+ field: "MaxTokens",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetFillRate(); wrapper != nil {
+
+ if wrapper.GetValue() <= 0 {
+ return RateLimitSettingsValidationError{
+ field: "FillRate",
+ reason: "value must be greater than 0",
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// RateLimitSettingsValidationError is the validation error returned by
+// RateLimitSettings.Validate if the designated constraints aren't met.
+type RateLimitSettingsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RateLimitSettingsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RateLimitSettingsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RateLimitSettingsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RateLimitSettingsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RateLimitSettingsValidationError) ErrorName() string {
+ return "RateLimitSettingsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RateLimitSettingsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRateLimitSettings.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RateLimitSettingsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RateLimitSettingsValidationError{}
+
+// Validate checks the field values on ConfigSource with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *ConfigSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ for idx, item := range m.GetAuthorities() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: fmt.Sprintf("Authorities[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetInitialFetchTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "InitialFetchTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if _, ok := ApiVersion_name[int32(m.GetResourceApiVersion())]; !ok {
+ return ConfigSourceValidationError{
+ field: "ResourceApiVersion",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ switch m.ConfigSourceSpecifier.(type) {
+
+ case *ConfigSource_Path:
+ // no validation rules for Path
+
+ case *ConfigSource_ApiConfigSource:
+
+ if v, ok := interface{}(m.GetApiConfigSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "ApiConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ConfigSource_Ads:
+
+ if v, ok := interface{}(m.GetAds()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "Ads",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ConfigSource_Self:
+
+ if v, ok := interface{}(m.GetSelf()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "Self",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return ConfigSourceValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// ConfigSourceValidationError is the validation error returned by
+// ConfigSource.Validate if the designated constraints aren't met.
+type ConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ConfigSourceValidationError) ErrorName() string { return "ConfigSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ConfigSourceValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go
new file mode 100644
index 000000000..7f1d68502
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/event_service_config.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// [#not-implemented-hide:]
+// Configuration of the event reporting service endpoint.
+type EventServiceConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConfigSourceSpecifier:
+ // *EventServiceConfig_GrpcService
+ ConfigSourceSpecifier isEventServiceConfig_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"`
+}
+
+func (x *EventServiceConfig) Reset() {
+ *x = EventServiceConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EventServiceConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EventServiceConfig) ProtoMessage() {}
+
+func (x *EventServiceConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EventServiceConfig.ProtoReflect.Descriptor instead.
+func (*EventServiceConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *EventServiceConfig) GetConfigSourceSpecifier() isEventServiceConfig_ConfigSourceSpecifier {
+ if m != nil {
+ return m.ConfigSourceSpecifier
+ }
+ return nil
+}
+
+func (x *EventServiceConfig) GetGrpcService() *GrpcService {
+ if x, ok := x.GetConfigSourceSpecifier().(*EventServiceConfig_GrpcService); ok {
+ return x.GrpcService
+ }
+ return nil
+}
+
+type isEventServiceConfig_ConfigSourceSpecifier interface {
+ isEventServiceConfig_ConfigSourceSpecifier()
+}
+
+type EventServiceConfig_GrpcService struct {
+ // Specifies the gRPC service that hosts the event reporting service.
+ GrpcService *GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3,oneof"`
+}
+
+func (*EventServiceConfig_GrpcService) isEventServiceConfig_ConfigSourceSpecifier() {}
+
+var File_envoy_config_core_v3_event_service_config_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_event_service_config_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x01, 0x0a, 0x12,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x47, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_event_service_config_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_event_service_config_proto_rawDescData = file_envoy_config_core_v3_event_service_config_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_event_service_config_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_event_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_event_service_config_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_event_service_config_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_event_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_event_service_config_proto_goTypes = []interface{}{
+ (*EventServiceConfig)(nil), // 0: envoy.config.core.v3.EventServiceConfig
+ (*GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService
+}
+var file_envoy_config_core_v3_event_service_config_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.EventServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_event_service_config_proto_init() }
+func file_envoy_config_core_v3_event_service_config_proto_init() {
+ if File_envoy_config_core_v3_event_service_config_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_grpc_service_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EventServiceConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*EventServiceConfig_GrpcService)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_event_service_config_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_event_service_config_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_event_service_config_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_event_service_config_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_event_service_config_proto = out.File
+ file_envoy_config_core_v3_event_service_config_proto_rawDesc = nil
+ file_envoy_config_core_v3_event_service_config_proto_goTypes = nil
+ file_envoy_config_core_v3_event_service_config_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go
new file mode 100644
index 000000000..c797b32fd
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go
@@ -0,0 +1,123 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/event_service_config.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on EventServiceConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *EventServiceConfig) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.ConfigSourceSpecifier.(type) {
+
+ case *EventServiceConfig_GrpcService:
+
+ if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EventServiceConfigValidationError{
+ field: "GrpcService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return EventServiceConfigValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// EventServiceConfigValidationError is the validation error returned by
+// EventServiceConfig.Validate if the designated constraints aren't met.
+type EventServiceConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EventServiceConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EventServiceConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EventServiceConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EventServiceConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EventServiceConfigValidationError) ErrorName() string {
+ return "EventServiceConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EventServiceConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEventServiceConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EventServiceConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EventServiceConfigValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go
new file mode 100644
index 000000000..80b851ecd
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go
@@ -0,0 +1,312 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/extension.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Message type for extension configuration.
+// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.].
+type TypedExtensionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of an extension. This is not used to select the extension, instead
+ // it serves the role of an opaque identifier.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The typed config for the extension. The type URL will be used to identify
+ // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*,
+ // the inner type URL of *TypedStruct* will be utilized. See the
+ // :ref:`extension configuration overview
+ // ` for further details.
+ TypedConfig *any.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"`
+}
+
+func (x *TypedExtensionConfig) Reset() {
+ *x = TypedExtensionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedExtensionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedExtensionConfig) ProtoMessage() {}
+
+func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead.
+func (*TypedExtensionConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_extension_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedExtensionConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *TypedExtensionConfig) GetTypedConfig() *any.Any {
+ if x != nil {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+// Configuration source specifier for a late-bound extension configuration. The
+// parent resource is warmed until all the initial extension configurations are
+// received, unless the flag to apply the default configuration is set.
+// Subsequent extension updates are atomic on a per-worker basis. Once an
+// extension configuration is applied to a request or a connection, it remains
+// constant for the duration of processing. If the initial delivery of the
+// extension configuration fails, due to a timeout for example, the optional
+// default configuration is applied. Without a default configuration, the
+// extension is disabled, until an extension configuration is received. The
+// behavior of a disabled extension depends on the context. For example, a
+// filter chain with a disabled extension filter rejects all incoming streams.
+type ExtensionConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ConfigSource *ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"`
+ // Optional default configuration to use as the initial configuration if
+ // there is a failure to receive the initial extension configuration or if
+ // `apply_default_config_without_warming` flag is set.
+ DefaultConfig *any.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"`
+ // Use the default config as the initial configuration without warming and
+ // waiting for the first discovery response. Requires the default configuration
+ // to be supplied.
+ ApplyDefaultConfigWithoutWarming bool `protobuf:"varint,3,opt,name=apply_default_config_without_warming,json=applyDefaultConfigWithoutWarming,proto3" json:"apply_default_config_without_warming,omitempty"`
+ // A set of permitted extension type URLs. Extension configuration updates are rejected
+ // if they do not match any type URL in the set.
+ TypeUrls []string `protobuf:"bytes,4,rep,name=type_urls,json=typeUrls,proto3" json:"type_urls,omitempty"`
+}
+
+func (x *ExtensionConfigSource) Reset() {
+ *x = ExtensionConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_extension_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExtensionConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionConfigSource) ProtoMessage() {}
+
+func (x *ExtensionConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_extension_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionConfigSource.ProtoReflect.Descriptor instead.
+func (*ExtensionConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_extension_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExtensionConfigSource) GetConfigSource() *ConfigSource {
+ if x != nil {
+ return x.ConfigSource
+ }
+ return nil
+}
+
+func (x *ExtensionConfigSource) GetDefaultConfig() *any.Any {
+ if x != nil {
+ return x.DefaultConfig
+ }
+ return nil
+}
+
+func (x *ExtensionConfigSource) GetApplyDefaultConfigWithoutWarming() bool {
+ if x != nil {
+ return x.ApplyDefaultConfigWithoutWarming
+ }
+ return false
+}
+
+func (x *ExtensionConfigSource) GetTypeUrls() []string {
+ if x != nil {
+ return x.TypeUrls
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_extension_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_extension_proto_rawDesc = []byte{
+ 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x28, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
+ 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x76, 0x0a, 0x14, 0x54, 0x79, 0x70,
+ 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41,
+ 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xa2,
+ 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x22, 0x9e, 0x02, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0d, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01,
+ 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3b,
+ 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0d, 0x64, 0x65,
+ 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x24, 0x61,
+ 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x77, 0x61, 0x72, 0x6d,
+ 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x79,
+ 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x57, 0x69, 0x74,
+ 0x68, 0x6f, 0x75, 0x74, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x09, 0x74,
+ 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08,
+ 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72,
+ 0x6c, 0x73, 0x42, 0x3e, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02,
+ 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_extension_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_extension_proto_rawDescData = file_envoy_config_core_v3_extension_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_extension_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_extension_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_extension_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_extension_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_config_core_v3_extension_proto_goTypes = []interface{}{
+ (*TypedExtensionConfig)(nil), // 0: envoy.config.core.v3.TypedExtensionConfig
+ (*ExtensionConfigSource)(nil), // 1: envoy.config.core.v3.ExtensionConfigSource
+ (*any.Any)(nil), // 2: google.protobuf.Any
+ (*ConfigSource)(nil), // 3: envoy.config.core.v3.ConfigSource
+}
+var file_envoy_config_core_v3_extension_proto_depIdxs = []int32{
+ 2, // 0: envoy.config.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any
+ 3, // 1: envoy.config.core.v3.ExtensionConfigSource.config_source:type_name -> envoy.config.core.v3.ConfigSource
+ 2, // 2: envoy.config.core.v3.ExtensionConfigSource.default_config:type_name -> google.protobuf.Any
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_extension_proto_init() }
+func file_envoy_config_core_v3_extension_proto_init() {
+ if File_envoy_config_core_v3_extension_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_config_source_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedExtensionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtensionConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_extension_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_extension_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_extension_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_extension_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_extension_proto = out.File
+ file_envoy_config_core_v3_extension_proto_rawDesc = nil
+ file_envoy_config_core_v3_extension_proto_goTypes = nil
+ file_envoy_config_core_v3_extension_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go
new file mode 100644
index 000000000..2e399dbe5
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go
@@ -0,0 +1,216 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/extension.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *TypedExtensionConfig) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ return TypedExtensionConfigValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if m.GetTypedConfig() == nil {
+ return TypedExtensionConfigValidationError{
+ field: "TypedConfig",
+ reason: "value is required",
+ }
+ }
+
+ if a := m.GetTypedConfig(); a != nil {
+
+ }
+
+ return nil
+}
+
+// TypedExtensionConfigValidationError is the validation error returned by
+// TypedExtensionConfig.Validate if the designated constraints aren't met.
+type TypedExtensionConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedExtensionConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedExtensionConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedExtensionConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedExtensionConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedExtensionConfigValidationError) ErrorName() string {
+ return "TypedExtensionConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e TypedExtensionConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedExtensionConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedExtensionConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedExtensionConfigValidationError{}
+
+// Validate checks the field values on ExtensionConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *ExtensionConfigSource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetConfigSource() == nil {
+ return ExtensionConfigSourceValidationError{
+ field: "ConfigSource",
+ reason: "value is required",
+ }
+ }
+
+ if a := m.GetConfigSource(); a != nil {
+
+ }
+
+ if v, ok := interface{}(m.GetDefaultConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtensionConfigSourceValidationError{
+ field: "DefaultConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ApplyDefaultConfigWithoutWarming
+
+ if len(m.GetTypeUrls()) < 1 {
+ return ExtensionConfigSourceValidationError{
+ field: "TypeUrls",
+ reason: "value must contain at least 1 item(s)",
+ }
+ }
+
+ return nil
+}
+
+// ExtensionConfigSourceValidationError is the validation error returned by
+// ExtensionConfigSource.Validate if the designated constraints aren't met.
+type ExtensionConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ExtensionConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ExtensionConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ExtensionConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ExtensionConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ExtensionConfigSourceValidationError) ErrorName() string {
+ return "ExtensionConfigSourceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ExtensionConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sExtensionConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ExtensionConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ExtensionConfigSourceValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go
new file mode 100644
index 000000000..6eaf4dafd
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go
@@ -0,0 +1,247 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/grpc_method_list.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// A list of gRPC methods which can be used as an allowlist, for example.
+type GrpcMethodList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Services []*GrpcMethodList_Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
+}
+
+func (x *GrpcMethodList) Reset() {
+ *x = GrpcMethodList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcMethodList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcMethodList) ProtoMessage() {}
+
+func (x *GrpcMethodList) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcMethodList.ProtoReflect.Descriptor instead.
+func (*GrpcMethodList) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *GrpcMethodList) GetServices() []*GrpcMethodList_Service {
+ if x != nil {
+ return x.Services
+ }
+ return nil
+}
+
+type GrpcMethodList_Service struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the gRPC service.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The names of the gRPC methods in this service.
+ MethodNames []string `protobuf:"bytes,2,rep,name=method_names,json=methodNames,proto3" json:"method_names,omitempty"`
+}
+
+func (x *GrpcMethodList_Service) Reset() {
+ *x = GrpcMethodList_Service{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcMethodList_Service) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcMethodList_Service) ProtoMessage() {}
+
+func (x *GrpcMethodList_Service) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcMethodList_Service.ProtoReflect.Descriptor instead.
+func (*GrpcMethodList_Service) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *GrpcMethodList_Service) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *GrpcMethodList_Service) GetMethodNames() []string {
+ if x != nil {
+ return x.MethodNames
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_grpc_method_list_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = []byte{
+ 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x68,
+ 0x6f, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a,
+ 0x02, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73,
+ 0x74, 0x12, 0x48, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4d,
+ 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x07,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92,
+ 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70,
+ 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x43, 0x0a, 0x22, 0x69,
+ 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x42, 0x13, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73,
+ 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = file_envoy_config_core_v3_grpc_method_list_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_method_list_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_grpc_method_list_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_grpc_method_list_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_config_core_v3_grpc_method_list_proto_goTypes = []interface{}{
+ (*GrpcMethodList)(nil), // 0: envoy.config.core.v3.GrpcMethodList
+ (*GrpcMethodList_Service)(nil), // 1: envoy.config.core.v3.GrpcMethodList.Service
+}
+var file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.GrpcMethodList.services:type_name -> envoy.config.core.v3.GrpcMethodList.Service
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_grpc_method_list_proto_init() }
+func file_envoy_config_core_v3_grpc_method_list_proto_init() {
+ if File_envoy_config_core_v3_grpc_method_list_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcMethodList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcMethodList_Service); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_grpc_method_list_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_grpc_method_list_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_grpc_method_list_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_grpc_method_list_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_grpc_method_list_proto = out.File
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = nil
+ file_envoy_config_core_v3_grpc_method_list_proto_goTypes = nil
+ file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go
new file mode 100644
index 000000000..9057d259a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go
@@ -0,0 +1,195 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/grpc_method_list.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on GrpcMethodList with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *GrpcMethodList) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ for idx, item := range m.GetServices() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcMethodListValidationError{
+ field: fmt.Sprintf("Services[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// GrpcMethodListValidationError is the validation error returned by
+// GrpcMethodList.Validate if the designated constraints aren't met.
+type GrpcMethodListValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcMethodListValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcMethodListValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcMethodListValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcMethodListValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcMethodListValidationError) ErrorName() string { return "GrpcMethodListValidationError" }
+
+// Error satisfies the builtin error interface
+func (e GrpcMethodListValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcMethodList.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcMethodListValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcMethodListValidationError{}
+
+// Validate checks the field values on GrpcMethodList_Service with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *GrpcMethodList_Service) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ return GrpcMethodList_ServiceValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if len(m.GetMethodNames()) < 1 {
+ return GrpcMethodList_ServiceValidationError{
+ field: "MethodNames",
+ reason: "value must contain at least 1 item(s)",
+ }
+ }
+
+ return nil
+}
+
+// GrpcMethodList_ServiceValidationError is the validation error returned by
+// GrpcMethodList_Service.Validate if the designated constraints aren't met.
+type GrpcMethodList_ServiceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcMethodList_ServiceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcMethodList_ServiceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcMethodList_ServiceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcMethodList_ServiceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcMethodList_ServiceValidationError) ErrorName() string {
+ return "GrpcMethodList_ServiceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcMethodList_ServiceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcMethodList_Service.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcMethodList_ServiceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcMethodList_ServiceValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go
new file mode 100644
index 000000000..db7cf3e27
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go
@@ -0,0 +1,1768 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/grpc_service.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// gRPC service configuration. This is used by :ref:`ApiConfigSource
+// ` and filter configurations.
+// [#next-free-field: 6]
+type GrpcService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TargetSpecifier:
+ // *GrpcService_EnvoyGrpc_
+ // *GrpcService_GoogleGrpc_
+ TargetSpecifier isGrpcService_TargetSpecifier `protobuf_oneof:"target_specifier"`
+ // The timeout for the gRPC request. This is the timeout for a specific
+ // request.
+ Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // Additional metadata to include in streams initiated to the GrpcService. This can be used for
+ // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to
+ // be injected. For more information, including details on header value syntax, see the
+ // documentation on :ref:`custom request headers
+ // `.
+ InitialMetadata []*HeaderValue `protobuf:"bytes,5,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"`
+}
+
+func (x *GrpcService) Reset() {
+ *x = GrpcService{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService) ProtoMessage() {}
+
+func (x *GrpcService) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService.ProtoReflect.Descriptor instead.
+func (*GrpcService) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *GrpcService) GetTargetSpecifier() isGrpcService_TargetSpecifier {
+ if m != nil {
+ return m.TargetSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService) GetEnvoyGrpc() *GrpcService_EnvoyGrpc {
+ if x, ok := x.GetTargetSpecifier().(*GrpcService_EnvoyGrpc_); ok {
+ return x.EnvoyGrpc
+ }
+ return nil
+}
+
+func (x *GrpcService) GetGoogleGrpc() *GrpcService_GoogleGrpc {
+ if x, ok := x.GetTargetSpecifier().(*GrpcService_GoogleGrpc_); ok {
+ return x.GoogleGrpc
+ }
+ return nil
+}
+
+func (x *GrpcService) GetTimeout() *duration.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *GrpcService) GetInitialMetadata() []*HeaderValue {
+ if x != nil {
+ return x.InitialMetadata
+ }
+ return nil
+}
+
+type isGrpcService_TargetSpecifier interface {
+ isGrpcService_TargetSpecifier()
+}
+
+type GrpcService_EnvoyGrpc_ struct {
+ // Envoy's in-built gRPC client.
+ // See the :ref:`gRPC services overview `
+ // documentation for discussion on gRPC client selection.
+ EnvoyGrpc *GrpcService_EnvoyGrpc `protobuf:"bytes,1,opt,name=envoy_grpc,json=envoyGrpc,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ struct {
+ // `Google C++ gRPC client `_
+ // See the :ref:`gRPC services overview `
+ // documentation for discussion on gRPC client selection.
+ GoogleGrpc *GrpcService_GoogleGrpc `protobuf:"bytes,2,opt,name=google_grpc,json=googleGrpc,proto3,oneof"`
+}
+
+func (*GrpcService_EnvoyGrpc_) isGrpcService_TargetSpecifier() {}
+
+func (*GrpcService_GoogleGrpc_) isGrpcService_TargetSpecifier() {}
+
+type GrpcService_EnvoyGrpc struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the upstream gRPC cluster. SSL credentials will be supplied
+ // in the :ref:`Cluster ` :ref:`transport_socket
+ // `.
+ ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`.
+ // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.
+ Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"`
+}
+
+func (x *GrpcService_EnvoyGrpc) Reset() {
+ *x = GrpcService_EnvoyGrpc{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_EnvoyGrpc) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_EnvoyGrpc) ProtoMessage() {}
+
+func (x *GrpcService_EnvoyGrpc) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_EnvoyGrpc.ProtoReflect.Descriptor instead.
+func (*GrpcService_EnvoyGrpc) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *GrpcService_EnvoyGrpc) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *GrpcService_EnvoyGrpc) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+// [#next-free-field: 9]
+type GrpcService_GoogleGrpc struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The target URI when using the `Google C++ gRPC client
+ // `_. SSL credentials will be supplied in
+ // :ref:`channel_credentials `.
+ TargetUri string `protobuf:"bytes,1,opt,name=target_uri,json=targetUri,proto3" json:"target_uri,omitempty"`
+ ChannelCredentials *GrpcService_GoogleGrpc_ChannelCredentials `protobuf:"bytes,2,opt,name=channel_credentials,json=channelCredentials,proto3" json:"channel_credentials,omitempty"`
+ // A set of call credentials that can be composed with `channel credentials
+ // `_.
+ CallCredentials []*GrpcService_GoogleGrpc_CallCredentials `protobuf:"bytes,3,rep,name=call_credentials,json=callCredentials,proto3" json:"call_credentials,omitempty"`
+ // The human readable prefix to use when emitting statistics for the gRPC
+ // service.
+ //
+ // .. csv-table::
+ // :header: Name, Type, Description
+ // :widths: 1, 1, 2
+ //
+ // streams_total, Counter, Total number of streams opened
+ // streams_closed_, Counter, Total streams closed with
+ StatPrefix string `protobuf:"bytes,4,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"`
+ // The name of the Google gRPC credentials factory to use. This must have been registered with
+ // Envoy. If this is empty, a default credentials factory will be used that sets up channel
+ // credentials based on other configuration parameters.
+ CredentialsFactoryName string `protobuf:"bytes,5,opt,name=credentials_factory_name,json=credentialsFactoryName,proto3" json:"credentials_factory_name,omitempty"`
+ // Additional configuration for site-specific customizations of the Google
+ // gRPC library.
+ Config *_struct.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"`
+ // How many bytes each stream can buffer internally.
+ // If not set an implementation defined default is applied (1MiB).
+ PerStreamBufferLimitBytes *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=per_stream_buffer_limit_bytes,json=perStreamBufferLimitBytes,proto3" json:"per_stream_buffer_limit_bytes,omitempty"`
+ // Custom channels args.
+ ChannelArgs *GrpcService_GoogleGrpc_ChannelArgs `protobuf:"bytes,8,opt,name=channel_args,json=channelArgs,proto3" json:"channel_args,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc) Reset() {
+ *x = GrpcService_GoogleGrpc{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *GrpcService_GoogleGrpc) GetTargetUri() string {
+ if x != nil {
+ return x.TargetUri
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc) GetChannelCredentials() *GrpcService_GoogleGrpc_ChannelCredentials {
+ if x != nil {
+ return x.ChannelCredentials
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetCallCredentials() []*GrpcService_GoogleGrpc_CallCredentials {
+ if x != nil {
+ return x.CallCredentials
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetStatPrefix() string {
+ if x != nil {
+ return x.StatPrefix
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc) GetCredentialsFactoryName() string {
+ if x != nil {
+ return x.CredentialsFactoryName
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc) GetConfig() *_struct.Struct {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetPerStreamBufferLimitBytes() *wrappers.UInt32Value {
+ if x != nil {
+ return x.PerStreamBufferLimitBytes
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetChannelArgs() *GrpcService_GoogleGrpc_ChannelArgs {
+ if x != nil {
+ return x.ChannelArgs
+ }
+ return nil
+}
+
+// See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.
+type GrpcService_GoogleGrpc_SslCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // PEM encoded server root certificates.
+ RootCerts *DataSource `protobuf:"bytes,1,opt,name=root_certs,json=rootCerts,proto3" json:"root_certs,omitempty"`
+ // PEM encoded client private key.
+ PrivateKey *DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"`
+ // PEM encoded client certificate chain.
+ CertChain *DataSource `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_SslCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_SslCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_SslCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_SslCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) GetRootCerts() *DataSource {
+ if x != nil {
+ return x.RootCerts
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) GetPrivateKey() *DataSource {
+ if x != nil {
+ return x.PrivateKey
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) GetCertChain() *DataSource {
+ if x != nil {
+ return x.CertChain
+ }
+ return nil
+}
+
+// Local channel credentials. Only UDS is supported for now.
+// See https://github.com/grpc/grpc/pull/15909.
+type GrpcService_GoogleGrpc_GoogleLocalCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_GoogleLocalCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_GoogleLocalCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 1}
+}
+
+// See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call
+// credential types.
+type GrpcService_GoogleGrpc_ChannelCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to CredentialSpecifier:
+ // *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials
+ // *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault
+ // *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials
+ CredentialSpecifier isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"`
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_ChannelCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_ChannelCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_ChannelCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 2}
+}
+
+func (m *GrpcService_GoogleGrpc_ChannelCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier {
+ if m != nil {
+ return m.CredentialSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetSslCredentials() *GrpcService_GoogleGrpc_SslCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok {
+ return x.SslCredentials
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetGoogleDefault() *emptypb.Empty {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok {
+ return x.GoogleDefault
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetLocalCredentials() *GrpcService_GoogleGrpc_GoogleLocalCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok {
+ return x.LocalCredentials
+ }
+ return nil
+}
+
+type isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier interface {
+ isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier()
+}
+
+type GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials struct {
+ SslCredentials *GrpcService_GoogleGrpc_SslCredentials `protobuf:"bytes,1,opt,name=ssl_credentials,json=sslCredentials,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault struct {
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61
+ GoogleDefault *emptypb.Empty `protobuf:"bytes,2,opt,name=google_default,json=googleDefault,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials struct {
+ LocalCredentials *GrpcService_GoogleGrpc_GoogleLocalCredentials `protobuf:"bytes,3,opt,name=local_credentials,json=localCredentials,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() {
+}
+
+// [#next-free-field: 8]
+type GrpcService_GoogleGrpc_CallCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to CredentialSpecifier:
+ // *GrpcService_GoogleGrpc_CallCredentials_AccessToken
+ // *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine
+ // *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken
+ // *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess
+ // *GrpcService_GoogleGrpc_CallCredentials_GoogleIam
+ // *GrpcService_GoogleGrpc_CallCredentials_FromPlugin
+ // *GrpcService_GoogleGrpc_CallCredentials_StsService_
+ CredentialSpecifier isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3}
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier {
+ if m != nil {
+ return m.CredentialSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetAccessToken() string {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok {
+ return x.AccessToken
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleComputeEngine() *emptypb.Empty {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok {
+ return x.GoogleComputeEngine
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleRefreshToken() string {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok {
+ return x.GoogleRefreshToken
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetServiceAccountJwtAccess() *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok {
+ return x.ServiceAccountJwtAccess
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleIam() *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok {
+ return x.GoogleIam
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetFromPlugin() *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok {
+ return x.FromPlugin
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetStsService() *GrpcService_GoogleGrpc_CallCredentials_StsService {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok {
+ return x.StsService
+ }
+ return nil
+}
+
+type isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier interface {
+ isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier()
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_AccessToken struct {
+ // Access token credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.
+ AccessToken string `protobuf:"bytes,1,opt,name=access_token,json=accessToken,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine struct {
+ // Google Compute Engine credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61
+ GoogleComputeEngine *emptypb.Empty `protobuf:"bytes,2,opt,name=google_compute_engine,json=googleComputeEngine,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken struct {
+ // Google refresh token credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.
+ GoogleRefreshToken string `protobuf:"bytes,3,opt,name=google_refresh_token,json=googleRefreshToken,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess struct {
+ // Service Account JWT Access credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.
+ ServiceAccountJwtAccess *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials `protobuf:"bytes,4,opt,name=service_account_jwt_access,json=serviceAccountJwtAccess,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleIam struct {
+ // Google IAM credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.
+ GoogleIam *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials `protobuf:"bytes,5,opt,name=google_iam,json=googleIam,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_FromPlugin struct {
+ // Custom authenticator credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.
+ // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.
+ FromPlugin *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin `protobuf:"bytes,6,opt,name=from_plugin,json=fromPlugin,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_StsService_ struct {
+ // Custom security token service which implements OAuth 2.0 token exchange.
+ // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16
+ // See https://github.com/grpc/grpc/pull/19587.
+ StsService *GrpcService_GoogleGrpc_CallCredentials_StsService `protobuf:"bytes,7,opt,name=sts_service,json=stsService,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_AccessToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_StsService_) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+// Channel arguments.
+type GrpcService_GoogleGrpc_ChannelArgs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // See grpc_types.h GRPC_ARG #defines for keys that work here.
+ Args map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) Reset() {
+ *x = GrpcService_GoogleGrpc_ChannelArgs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_ChannelArgs) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4}
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) GetArgs() map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ JsonKey string `protobuf:"bytes,1,opt,name=json_key,json=jsonKey,proto3" json:"json_key,omitempty"`
+ TokenLifetimeSeconds uint64 `protobuf:"varint,2,opt,name=token_lifetime_seconds,json=tokenLifetimeSeconds,proto3" json:"token_lifetime_seconds,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 0}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetJsonKey() string {
+ if x != nil {
+ return x.JsonKey
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetTokenLifetimeSeconds() uint64 {
+ if x != nil {
+ return x.TokenLifetimeSeconds
+ }
+ return 0
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AuthorizationToken string `protobuf:"bytes,1,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"`
+ AuthoritySelector string `protobuf:"bytes,2,opt,name=authority_selector,json=authoritySelector,proto3" json:"authority_selector,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 1}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthorizationToken() string {
+ if x != nil {
+ return x.AuthorizationToken
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthoritySelector() string {
+ if x != nil {
+ return x.AuthoritySelector
+ }
+ return ""
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // [#extension-category: envoy.grpc_credentials]
+ //
+ // Types that are assignable to ConfigType:
+ // *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig
+ // *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_HiddenEnvoyDeprecatedConfig
+ ConfigType isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 2}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetConfigType() isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *any.Any {
+ if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetHiddenEnvoyDeprecatedConfig() *_struct.Struct {
+ if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_HiddenEnvoyDeprecatedConfig); ok {
+ return x.HiddenEnvoyDeprecatedConfig
+ }
+ return nil
+}
+
+type isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType interface {
+ isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType()
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig struct {
+ TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_HiddenEnvoyDeprecatedConfig struct {
+ // Deprecated: Do not use.
+ HiddenEnvoyDeprecatedConfig *_struct.Struct `protobuf:"bytes,2,opt,name=hidden_envoy_deprecated_config,json=hiddenEnvoyDeprecatedConfig,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_HiddenEnvoyDeprecatedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() {
+}
+
+// Security token service configuration that allows Google gRPC to
+// fetch security token from an OAuth 2.0 authorization server.
+// See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and
+// https://github.com/grpc/grpc/pull/19587.
+// [#next-free-field: 10]
+type GrpcService_GoogleGrpc_CallCredentials_StsService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // URI of the token exchange service that handles token exchange requests.
+ // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by
+ // https://github.com/envoyproxy/protoc-gen-validate/issues/303]
+ TokenExchangeServiceUri string `protobuf:"bytes,1,opt,name=token_exchange_service_uri,json=tokenExchangeServiceUri,proto3" json:"token_exchange_service_uri,omitempty"`
+ // Location of the target service or resource where the client
+ // intends to use the requested security token.
+ Resource string `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Logical name of the target service where the client intends to
+ // use the requested security token.
+ Audience string `protobuf:"bytes,3,opt,name=audience,proto3" json:"audience,omitempty"`
+ // The desired scope of the requested security token in the
+ // context of the service or resource where the token will be used.
+ Scope string `protobuf:"bytes,4,opt,name=scope,proto3" json:"scope,omitempty"`
+ // Type of the requested security token.
+ RequestedTokenType string `protobuf:"bytes,5,opt,name=requested_token_type,json=requestedTokenType,proto3" json:"requested_token_type,omitempty"`
+ // The path of subject token, a security token that represents the
+ // identity of the party on behalf of whom the request is being made.
+ SubjectTokenPath string `protobuf:"bytes,6,opt,name=subject_token_path,json=subjectTokenPath,proto3" json:"subject_token_path,omitempty"`
+ // Type of the subject token.
+ SubjectTokenType string `protobuf:"bytes,7,opt,name=subject_token_type,json=subjectTokenType,proto3" json:"subject_token_type,omitempty"`
+ // The path of actor token, a security token that represents the identity
+ // of the acting party. The acting party is authorized to use the
+ // requested security token and act on behalf of the subject.
+ ActorTokenPath string `protobuf:"bytes,8,opt,name=actor_token_path,json=actorTokenPath,proto3" json:"actor_token_path,omitempty"`
+ // Type of the actor token.
+ ActorTokenType string `protobuf:"bytes,9,opt,name=actor_token_type,json=actorTokenType,proto3" json:"actor_token_type,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_StsService{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_StsService.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_StsService) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 3}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetTokenExchangeServiceUri() string {
+ if x != nil {
+ return x.TokenExchangeServiceUri
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetResource() string {
+ if x != nil {
+ return x.Resource
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetAudience() string {
+ if x != nil {
+ return x.Audience
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetScope() string {
+ if x != nil {
+ return x.Scope
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetRequestedTokenType() string {
+ if x != nil {
+ return x.RequestedTokenType
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenPath() string {
+ if x != nil {
+ return x.SubjectTokenPath
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenType() string {
+ if x != nil {
+ return x.SubjectTokenType
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenPath() string {
+ if x != nil {
+ return x.ActorTokenPath
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenType() string {
+ if x != nil {
+ return x.ActorTokenType
+ }
+ return ""
+}
+
+type GrpcService_GoogleGrpc_ChannelArgs_Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Pointer values are not supported, since they don't make any sense when
+ // delivered via the API.
+ //
+ // Types that are assignable to ValueSpecifier:
+ // *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue
+ // *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue
+ ValueSpecifier isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier `protobuf_oneof:"value_specifier"`
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) Reset() {
+ *x = GrpcService_GoogleGrpc_ChannelArgs_Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs_Value.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4, 0}
+}
+
+func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) GetValueSpecifier() isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier {
+ if m != nil {
+ return m.ValueSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetStringValue() string {
+ if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetIntValue() int64 {
+ if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+type isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier interface {
+ isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier()
+}
+
+type GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue struct {
+ StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue struct {
+ IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() {
+}
+
+var File_envoy_config_core_v3_grpc_service_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70,
+ 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65,
+ 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xeb, 0x21, 0x0a, 0x0b, 0x47, 0x72, 0x70,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x09, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x12, 0x4f, 0x0a, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5f, 0x67, 0x72, 0x70, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4c, 0x0a, 0x10,
+ 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69,
+ 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x98, 0x01, 0x0a, 0x09, 0x45,
+ 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74,
+ 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x00,
+ 0x28, 0x80, 0x80, 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x74, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f,
+ 0x79, 0x47, 0x72, 0x70, 0x63, 0x1a, 0xd9, 0x1d, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75,
+ 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
+ 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a, 0x13,
+ 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43,
+ 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x67,
+ 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f,
+ 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42,
+ 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73,
+ 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73,
+ 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74,
+ 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, 0x1d,
+ 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65,
+ 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x19, 0x70, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66, 0x66,
+ 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0c,
+ 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63,
+ 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53, 0x73,
+ 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0a,
+ 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49, 0x0a,
+ 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72,
+ 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74,
+ 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09,
+ 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39,
+ 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72,
+ 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72,
+ 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a, 0x12,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, 0x43,
+ 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72,
+ 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x3a,
+ 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63,
+ 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x1a, 0xe7, 0x0f, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67, 0x69,
+ 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74,
+ 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a, 0x1a,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
+ 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54,
+ 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a, 0x0a,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, 0x6d,
+ 0x12, 0x7d, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69,
+ 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12,
+ 0x6a, 0x0a, 0x0b, 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72,
+ 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52,
+ 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x22,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57,
+ 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a,
+ 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x73, 0x3a, 0x62, 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72,
+ 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73,
+ 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x3a, 0x54, 0x9a, 0xc5, 0x88, 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70,
+ 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xc9, 0x02, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72,
+ 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x1e, 0x68, 0x69, 0x64, 0x64, 0x65,
+ 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8,
+ 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00, 0x52, 0x1b, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x45,
+ 0x6e, 0x76, 0x6f, 0x79, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a, 0x56, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72,
+ 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75,
+ 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x1a, 0xd7, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, 0x68, 0x61,
+ 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, 0x63, 0x68,
+ 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, 0x1a,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75,
+ 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75,
+ 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x14,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x35,
+ 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
+ 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10,
+ 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65,
+ 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70,
+ 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61,
+ 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a,
+ 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63,
+ 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a, 0x0b, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04, 0x61, 0x72,
+ 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72,
+ 0x67, 0x73, 0x2e, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x61, 0x72,
+ 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x16, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70,
+ 0x63, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x40, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x47, 0x72,
+ 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_grpc_service_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_grpc_service_proto_rawDescData = file_envoy_config_core_v3_grpc_service_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_grpc_service_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_service_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
+var file_envoy_config_core_v3_grpc_service_proto_goTypes = []interface{}{
+ (*GrpcService)(nil), // 0: envoy.config.core.v3.GrpcService
+ (*GrpcService_EnvoyGrpc)(nil), // 1: envoy.config.core.v3.GrpcService.EnvoyGrpc
+ (*GrpcService_GoogleGrpc)(nil), // 2: envoy.config.core.v3.GrpcService.GoogleGrpc
+ (*GrpcService_GoogleGrpc_SslCredentials)(nil), // 3: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials
+ (*GrpcService_GoogleGrpc_GoogleLocalCredentials)(nil), // 4: envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials
+ (*GrpcService_GoogleGrpc_ChannelCredentials)(nil), // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials
+ (*GrpcService_GoogleGrpc_CallCredentials)(nil), // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials
+ (*GrpcService_GoogleGrpc_ChannelArgs)(nil), // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs
+ (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials)(nil), // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials)(nil), // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials
+ (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin)(nil), // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin
+ (*GrpcService_GoogleGrpc_CallCredentials_StsService)(nil), // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService
+ (*GrpcService_GoogleGrpc_ChannelArgs_Value)(nil), // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value
+ nil, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry
+ (*duration.Duration)(nil), // 14: google.protobuf.Duration
+ (*HeaderValue)(nil), // 15: envoy.config.core.v3.HeaderValue
+ (*_struct.Struct)(nil), // 16: google.protobuf.Struct
+ (*wrappers.UInt32Value)(nil), // 17: google.protobuf.UInt32Value
+ (*DataSource)(nil), // 18: envoy.config.core.v3.DataSource
+ (*emptypb.Empty)(nil), // 19: google.protobuf.Empty
+ (*any.Any)(nil), // 20: google.protobuf.Any
+}
+var file_envoy_config_core_v3_grpc_service_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.GrpcService.envoy_grpc:type_name -> envoy.config.core.v3.GrpcService.EnvoyGrpc
+ 2, // 1: envoy.config.core.v3.GrpcService.google_grpc:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc
+ 14, // 2: envoy.config.core.v3.GrpcService.timeout:type_name -> google.protobuf.Duration
+ 15, // 3: envoy.config.core.v3.GrpcService.initial_metadata:type_name -> envoy.config.core.v3.HeaderValue
+ 5, // 4: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials
+ 6, // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials
+ 16, // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct
+ 17, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value
+ 7, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs
+ 18, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource
+ 18, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource
+ 18, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource
+ 3, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials
+ 19, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty
+ 4, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials
+ 19, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty
+ 8, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials
+ 9, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials
+ 10, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin
+ 11, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService
+ 13, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry
+ 20, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any
+ 16, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.hidden_envoy_deprecated_config:type_name -> google.protobuf.Struct
+ 12, // 23: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value
+ 24, // [24:24] is the sub-list for method output_type
+ 24, // [24:24] is the sub-list for method input_type
+ 24, // [24:24] is the sub-list for extension type_name
+ 24, // [24:24] is the sub-list for extension extendee
+ 0, // [0:24] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_grpc_service_proto_init() }
+func file_envoy_config_core_v3_grpc_service_proto_init() {
+ if File_envoy_config_core_v3_grpc_service_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_base_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_EnvoyGrpc); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_SslCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_GoogleLocalCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_ChannelCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_StsService); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs_Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*GrpcService_EnvoyGrpc_)(nil),
+ (*GrpcService_GoogleGrpc_)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials)(nil),
+ (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault)(nil),
+ (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_CallCredentials_AccessToken)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_StsService_)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_HiddenEnvoyDeprecatedConfig)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue)(nil),
+ (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_grpc_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 14,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_grpc_service_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_grpc_service_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_grpc_service_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_grpc_service_proto = out.File
+ file_envoy_config_core_v3_grpc_service_proto_rawDesc = nil
+ file_envoy_config_core_v3_grpc_service_proto_goTypes = nil
+ file_envoy_config_core_v3_grpc_service_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go
new file mode 100644
index 000000000..fea8740e7
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go
@@ -0,0 +1,1365 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/grpc_service.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on GrpcService with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *GrpcService) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: "Timeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetInitialMetadata() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: fmt.Sprintf("InitialMetadata[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ switch m.TargetSpecifier.(type) {
+
+ case *GrpcService_EnvoyGrpc_:
+
+ if v, ok := interface{}(m.GetEnvoyGrpc()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: "EnvoyGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_:
+
+ if v, ok := interface{}(m.GetGoogleGrpc()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: "GoogleGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return GrpcServiceValidationError{
+ field: "TargetSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// GrpcServiceValidationError is the validation error returned by
+// GrpcService.Validate if the designated constraints aren't met.
+type GrpcServiceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcServiceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcServiceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcServiceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcServiceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcServiceValidationError) ErrorName() string { return "GrpcServiceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e GrpcServiceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcServiceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcServiceValidationError{}
+
+// Validate checks the field values on GrpcService_EnvoyGrpc with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *GrpcService_EnvoyGrpc) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetClusterName()) < 1 {
+ return GrpcService_EnvoyGrpcValidationError{
+ field: "ClusterName",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetAuthority()) < 0 {
+ return GrpcService_EnvoyGrpcValidationError{
+ field: "Authority",
+ reason: "value length must be at least 0 runes",
+ }
+ }
+
+ if len(m.GetAuthority()) > 16384 {
+ return GrpcService_EnvoyGrpcValidationError{
+ field: "Authority",
+ reason: "value length must be at most 16384 bytes",
+ }
+ }
+
+ if !_GrpcService_EnvoyGrpc_Authority_Pattern.MatchString(m.GetAuthority()) {
+ return GrpcService_EnvoyGrpcValidationError{
+ field: "Authority",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ return nil
+}
+
+// GrpcService_EnvoyGrpcValidationError is the validation error returned by
+// GrpcService_EnvoyGrpc.Validate if the designated constraints aren't met.
+type GrpcService_EnvoyGrpcValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_EnvoyGrpcValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_EnvoyGrpcValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_EnvoyGrpcValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_EnvoyGrpcValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_EnvoyGrpcValidationError) ErrorName() string {
+ return "GrpcService_EnvoyGrpcValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_EnvoyGrpcValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_EnvoyGrpc.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_EnvoyGrpcValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_EnvoyGrpcValidationError{}
+
+var _GrpcService_EnvoyGrpc_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on GrpcService_GoogleGrpc with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *GrpcService_GoogleGrpc) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetTargetUri()) < 1 {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "TargetUri",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if v, ok := interface{}(m.GetChannelCredentials()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "ChannelCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetCallCredentials() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: fmt.Sprintf("CallCredentials[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if utf8.RuneCountInString(m.GetStatPrefix()) < 1 {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "StatPrefix",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ // no validation rules for CredentialsFactoryName
+
+ if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetPerStreamBufferLimitBytes()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "PerStreamBufferLimitBytes",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetChannelArgs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "ChannelArgs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpcValidationError is the validation error returned by
+// GrpcService_GoogleGrpc.Validate if the designated constraints aren't met.
+type GrpcService_GoogleGrpcValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpcValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpcValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpcValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpcValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpcValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpcValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpcValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpcValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpcValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_SslCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_SslCredentials) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetRootCerts()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "RootCerts",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "PrivateKey",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetCertChain()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "CertChain",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_SslCredentialsValidationError is the validation error
+// returned by GrpcService_GoogleGrpc_SslCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_SslCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_SslCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_SslCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_SslCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_SslCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError is the
+// validation error returned by
+// GrpcService_GoogleGrpc_GoogleLocalCredentials.Validate if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_GoogleLocalCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_ChannelCredentials) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.CredentialSpecifier.(type) {
+
+ case *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials:
+
+ if v, ok := interface{}(m.GetSslCredentials()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "SslCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault:
+
+ if v, ok := interface{}(m.GetGoogleDefault()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "GoogleDefault",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials:
+
+ if v, ok := interface{}(m.GetLocalCredentials()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "LocalCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_ChannelCredentialsValidationError is the validation
+// error returned by GrpcService_GoogleGrpc_ChannelCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_ChannelCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_ChannelCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_CallCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_CallCredentials) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.CredentialSpecifier.(type) {
+
+ case *GrpcService_GoogleGrpc_CallCredentials_AccessToken:
+ // no validation rules for AccessToken
+
+ case *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine:
+
+ if v, ok := interface{}(m.GetGoogleComputeEngine()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleComputeEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken:
+ // no validation rules for GoogleRefreshToken
+
+ case *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess:
+
+ if v, ok := interface{}(m.GetServiceAccountJwtAccess()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "ServiceAccountJwtAccess",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_GoogleIam:
+
+ if v, ok := interface{}(m.GetGoogleIam()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleIam",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_FromPlugin:
+
+ if v, ok := interface{}(m.GetFromPlugin()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "FromPlugin",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_StsService_:
+
+ if v, ok := interface{}(m.GetStsService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "StsService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentialsValidationError is the validation
+// error returned by GrpcService_GoogleGrpc_CallCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentialsValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_ChannelArgs) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ for key, val := range m.GetArgs() {
+ _ = val
+
+ // no validation rules for Args[key]
+
+ if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelArgsValidationError{
+ field: fmt.Sprintf("Args[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_ChannelArgsValidationError is the validation error
+// returned by GrpcService_GoogleGrpc_ChannelArgs.Validate if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelArgsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_ChannelArgsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_ChannelArgs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_ChannelArgsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_ChannelArgsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for JsonKey
+
+ // no validation rules for TokenLifetimeSeconds
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError
+// is the validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.Validate
+// if the designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Key() bool {
+ return e.key
+}
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for AuthorizationToken
+
+ // no validation rules for AuthoritySelector
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError
+// is the validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Key() bool {
+ return e.key
+}
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Name
+
+ switch m.ConfigType.(type) {
+
+ case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig:
+
+ if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_HiddenEnvoyDeprecatedConfig:
+
+ if v, ok := interface{}(m.GetHiddenEnvoyDeprecatedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{
+ field: "HiddenEnvoyDeprecatedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError
+// is the validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.Validate
+// if the designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Key() bool {
+ return e.key
+}
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for TokenExchangeServiceUri
+
+ // no validation rules for Resource
+
+ // no validation rules for Audience
+
+ // no validation rules for Scope
+
+ // no validation rules for RequestedTokenType
+
+ if utf8.RuneCountInString(m.GetSubjectTokenPath()) < 1 {
+ return GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{
+ field: "SubjectTokenPath",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetSubjectTokenType()) < 1 {
+ return GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{
+ field: "SubjectTokenType",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ // no validation rules for ActorTokenPath
+
+ // no validation rules for ActorTokenType
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError is the
+// validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_StsService.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_StsService.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs_Value
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, an error is returned.
+func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.ValueSpecifier.(type) {
+
+ case *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue:
+ // no validation rules for StringValue
+
+ case *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue:
+ // no validation rules for IntValue
+
+ default:
+ return GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{
+ field: "ValueSpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError is the validation
+// error returned by GrpcService_GoogleGrpc_ChannelArgs_Value.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_ChannelArgs_Value.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go
new file mode 100644
index 000000000..182d1552a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go
@@ -0,0 +1,1531 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/health_check.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Endpoint health status.
+type HealthStatus int32
+
+const (
+ // The health status is not known. This is interpreted by Envoy as *HEALTHY*.
+ HealthStatus_UNKNOWN HealthStatus = 0
+ // Healthy.
+ HealthStatus_HEALTHY HealthStatus = 1
+ // Unhealthy.
+ HealthStatus_UNHEALTHY HealthStatus = 2
+ // Connection draining in progress. E.g.,
+ // ``_
+ // or
+ // ``_.
+ // This is interpreted by Envoy as *UNHEALTHY*.
+ HealthStatus_DRAINING HealthStatus = 3
+ // Health check timed out. This is part of HDS and is interpreted by Envoy as
+ // *UNHEALTHY*.
+ HealthStatus_TIMEOUT HealthStatus = 4
+ // Degraded.
+ HealthStatus_DEGRADED HealthStatus = 5
+)
+
+// Enum value maps for HealthStatus.
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "HEALTHY",
+ 2: "UNHEALTHY",
+ 3: "DRAINING",
+ 4: "TIMEOUT",
+ 5: "DEGRADED",
+ }
+ HealthStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "HEALTHY": 1,
+ "UNHEALTHY": 2,
+ "DRAINING": 3,
+ "TIMEOUT": 4,
+ "DEGRADED": 5,
+ }
+)
+
+func (x HealthStatus) Enum() *HealthStatus {
+ p := new(HealthStatus)
+ *p = x
+ return p
+}
+
+func (x HealthStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_health_check_proto_enumTypes[0].Descriptor()
+}
+
+func (HealthStatus) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_health_check_proto_enumTypes[0]
+}
+
+func (x HealthStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthStatus.Descriptor instead.
+func (HealthStatus) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0}
+}
+
+// [#next-free-field: 25]
+type HealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The time to wait for a health check response. If the timeout is reached the
+ // health check attempt will be considered a failure.
+ Timeout *duration.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // The interval between health checks.
+ Interval *duration.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"`
+ // An optional jitter amount in milliseconds. If specified, Envoy will start health
+ // checking after for a random time in ms between 0 and initial_jitter. This only
+ // applies to the first health check.
+ InitialJitter *duration.Duration `protobuf:"bytes,20,opt,name=initial_jitter,json=initialJitter,proto3" json:"initial_jitter,omitempty"`
+ // An optional jitter amount in milliseconds. If specified, during every
+ // interval Envoy will add interval_jitter to the wait time.
+ IntervalJitter *duration.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"`
+ // An optional jitter amount as a percentage of interval_ms. If specified,
+ // during every interval Envoy will add interval_ms *
+ // interval_jitter_percent / 100 to the wait time.
+ //
+ // If interval_jitter_ms and interval_jitter_percent are both set, both of
+ // them will be used to increase the wait time.
+ IntervalJitterPercent uint32 `protobuf:"varint,18,opt,name=interval_jitter_percent,json=intervalJitterPercent,proto3" json:"interval_jitter_percent,omitempty"`
+ // The number of unhealthy health checks required before a host is marked
+ // unhealthy. Note that for *http* health checking if a host responds with 503
+ // this threshold is ignored and the host is considered unhealthy immediately.
+ UnhealthyThreshold *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=unhealthy_threshold,json=unhealthyThreshold,proto3" json:"unhealthy_threshold,omitempty"`
+ // The number of healthy health checks required before a host is marked
+ // healthy. Note that during startup, only a single successful health check is
+ // required to mark a host healthy.
+ HealthyThreshold *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=healthy_threshold,json=healthyThreshold,proto3" json:"healthy_threshold,omitempty"`
+ // [#not-implemented-hide:] Non-serving port for health checking.
+ AltPort *wrappers.UInt32Value `protobuf:"bytes,6,opt,name=alt_port,json=altPort,proto3" json:"alt_port,omitempty"`
+ // Reuse health check connection between health checks. Default is true.
+ ReuseConnection *wrappers.BoolValue `protobuf:"bytes,7,opt,name=reuse_connection,json=reuseConnection,proto3" json:"reuse_connection,omitempty"`
+ // Types that are assignable to HealthChecker:
+ // *HealthCheck_HttpHealthCheck_
+ // *HealthCheck_TcpHealthCheck_
+ // *HealthCheck_GrpcHealthCheck_
+ // *HealthCheck_CustomHealthCheck_
+ HealthChecker isHealthCheck_HealthChecker `protobuf_oneof:"health_checker"`
+ // The "no traffic interval" is a special health check interval that is used when a cluster has
+ // never had traffic routed to it. This lower interval allows cluster information to be kept up to
+ // date, without sending a potentially large amount of active health checking traffic for no
+ // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the
+ // standard health check interval that is defined. Note that this interval takes precedence over
+ // any other.
+ //
+ // The default value for "no traffic interval" is 60 seconds.
+ NoTrafficInterval *duration.Duration `protobuf:"bytes,12,opt,name=no_traffic_interval,json=noTrafficInterval,proto3" json:"no_traffic_interval,omitempty"`
+ // The "no traffic healthy interval" is a special health check interval that
+ // is used for hosts that are currently passing active health checking
+ // (including new hosts) when the cluster has received no traffic.
+ //
+ // This is useful for when we want to send frequent health checks with
+ // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once
+ // a host in the cluster is marked as healthy.
+ //
+ // Once a cluster has been used for traffic routing, Envoy will shift back to using the
+ // standard health check interval that is defined.
+ //
+ // If no_traffic_healthy_interval is not set, it will default to the
+ // no traffic interval and send that interval regardless of health state.
+ NoTrafficHealthyInterval *duration.Duration `protobuf:"bytes,24,opt,name=no_traffic_healthy_interval,json=noTrafficHealthyInterval,proto3" json:"no_traffic_healthy_interval,omitempty"`
+ // The "unhealthy interval" is a health check interval that is used for hosts that are marked as
+ // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the
+ // standard health check interval that is defined.
+ //
+ // The default value for "unhealthy interval" is the same as "interval".
+ UnhealthyInterval *duration.Duration `protobuf:"bytes,14,opt,name=unhealthy_interval,json=unhealthyInterval,proto3" json:"unhealthy_interval,omitempty"`
+ // The "unhealthy edge interval" is a special health check interval that is used for the first
+ // health check right after a host is marked as unhealthy. For subsequent health checks
+ // Envoy will shift back to using either "unhealthy interval" if present or the standard health
+ // check interval that is defined.
+ //
+ // The default value for "unhealthy edge interval" is the same as "unhealthy interval".
+ UnhealthyEdgeInterval *duration.Duration `protobuf:"bytes,15,opt,name=unhealthy_edge_interval,json=unhealthyEdgeInterval,proto3" json:"unhealthy_edge_interval,omitempty"`
+ // The "healthy edge interval" is a special health check interval that is used for the first
+ // health check right after a host is marked as healthy. For subsequent health checks
+ // Envoy will shift back to using the standard health check interval that is defined.
+ //
+ // The default value for "healthy edge interval" is the same as the default interval.
+ HealthyEdgeInterval *duration.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"`
+ // Specifies the path to the :ref:`health check event log `.
+ // If empty, no event log will be written.
+ EventLogPath string `protobuf:"bytes,17,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"`
+ // [#not-implemented-hide:]
+ // The gRPC service for the health check event service.
+ // If empty, health check events won't be sent to a remote endpoint.
+ EventService *EventServiceConfig `protobuf:"bytes,22,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"`
+ // If set to true, health check failure events will always be logged. If set to false, only the
+ // initial health check failure event will be logged.
+ // The default value is false.
+ AlwaysLogHealthCheckFailures bool `protobuf:"varint,19,opt,name=always_log_health_check_failures,json=alwaysLogHealthCheckFailures,proto3" json:"always_log_health_check_failures,omitempty"`
+ // This allows overriding the cluster TLS settings, just for health check connections.
+ TlsOptions *HealthCheck_TlsOptions `protobuf:"bytes,21,opt,name=tls_options,json=tlsOptions,proto3" json:"tls_options,omitempty"`
+ // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's
+ // :ref:`tranport socket matches `.
+ // For example, the following match criteria
+ //
+ // .. code-block:: yaml
+ //
+ // transport_socket_match_criteria:
+ // useMTLS: true
+ //
+ // Will match the following :ref:`cluster socket match `
+ //
+ // .. code-block:: yaml
+ //
+ // transport_socket_matches:
+ // - name: "useMTLS"
+ // match:
+ // useMTLS: true
+ // transport_socket:
+ // name: envoy.transport_sockets.tls
+ // config: { ... } # tls socket configuration
+ //
+ // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the
+ // :ref:`LbEndpoint.Metadata `.
+ // This allows using different transport socket capabilities for health checking versus proxying to the
+ // endpoint.
+ //
+ // If the key/values pairs specified do not match any
+ // :ref:`transport socket matches `,
+ // the cluster's :ref:`transport socket `
+ // will be used for health check socket configuration.
+ TransportSocketMatchCriteria *_struct.Struct `protobuf:"bytes,23,opt,name=transport_socket_match_criteria,json=transportSocketMatchCriteria,proto3" json:"transport_socket_match_criteria,omitempty"`
+}
+
+func (x *HealthCheck) Reset() {
+ *x = HealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthCheck) GetTimeout() *duration.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetInterval() *duration.Duration {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetInitialJitter() *duration.Duration {
+ if x != nil {
+ return x.InitialJitter
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetIntervalJitter() *duration.Duration {
+ if x != nil {
+ return x.IntervalJitter
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetIntervalJitterPercent() uint32 {
+ if x != nil {
+ return x.IntervalJitterPercent
+ }
+ return 0
+}
+
+func (x *HealthCheck) GetUnhealthyThreshold() *wrappers.UInt32Value {
+ if x != nil {
+ return x.UnhealthyThreshold
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetHealthyThreshold() *wrappers.UInt32Value {
+ if x != nil {
+ return x.HealthyThreshold
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetAltPort() *wrappers.UInt32Value {
+ if x != nil {
+ return x.AltPort
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetReuseConnection() *wrappers.BoolValue {
+ if x != nil {
+ return x.ReuseConnection
+ }
+ return nil
+}
+
+func (m *HealthCheck) GetHealthChecker() isHealthCheck_HealthChecker {
+ if m != nil {
+ return m.HealthChecker
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetHttpHealthCheck() *HealthCheck_HttpHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_HttpHealthCheck_); ok {
+ return x.HttpHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetTcpHealthCheck() *HealthCheck_TcpHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_TcpHealthCheck_); ok {
+ return x.TcpHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetGrpcHealthCheck() *HealthCheck_GrpcHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_GrpcHealthCheck_); ok {
+ return x.GrpcHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetCustomHealthCheck() *HealthCheck_CustomHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_CustomHealthCheck_); ok {
+ return x.CustomHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetNoTrafficInterval() *duration.Duration {
+ if x != nil {
+ return x.NoTrafficInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetNoTrafficHealthyInterval() *duration.Duration {
+ if x != nil {
+ return x.NoTrafficHealthyInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetUnhealthyInterval() *duration.Duration {
+ if x != nil {
+ return x.UnhealthyInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetUnhealthyEdgeInterval() *duration.Duration {
+ if x != nil {
+ return x.UnhealthyEdgeInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetHealthyEdgeInterval() *duration.Duration {
+ if x != nil {
+ return x.HealthyEdgeInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetEventLogPath() string {
+ if x != nil {
+ return x.EventLogPath
+ }
+ return ""
+}
+
+func (x *HealthCheck) GetEventService() *EventServiceConfig {
+ if x != nil {
+ return x.EventService
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetAlwaysLogHealthCheckFailures() bool {
+ if x != nil {
+ return x.AlwaysLogHealthCheckFailures
+ }
+ return false
+}
+
+func (x *HealthCheck) GetTlsOptions() *HealthCheck_TlsOptions {
+ if x != nil {
+ return x.TlsOptions
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetTransportSocketMatchCriteria() *_struct.Struct {
+ if x != nil {
+ return x.TransportSocketMatchCriteria
+ }
+ return nil
+}
+
+type isHealthCheck_HealthChecker interface {
+ isHealthCheck_HealthChecker()
+}
+
+type HealthCheck_HttpHealthCheck_ struct {
+ // HTTP health check.
+ HttpHealthCheck *HealthCheck_HttpHealthCheck `protobuf:"bytes,8,opt,name=http_health_check,json=httpHealthCheck,proto3,oneof"`
+}
+
+type HealthCheck_TcpHealthCheck_ struct {
+ // TCP health check.
+ TcpHealthCheck *HealthCheck_TcpHealthCheck `protobuf:"bytes,9,opt,name=tcp_health_check,json=tcpHealthCheck,proto3,oneof"`
+}
+
+type HealthCheck_GrpcHealthCheck_ struct {
+ // gRPC health check.
+ GrpcHealthCheck *HealthCheck_GrpcHealthCheck `protobuf:"bytes,11,opt,name=grpc_health_check,json=grpcHealthCheck,proto3,oneof"`
+}
+
+type HealthCheck_CustomHealthCheck_ struct {
+ // Custom health check.
+ CustomHealthCheck *HealthCheck_CustomHealthCheck `protobuf:"bytes,13,opt,name=custom_health_check,json=customHealthCheck,proto3,oneof"`
+}
+
+func (*HealthCheck_HttpHealthCheck_) isHealthCheck_HealthChecker() {}
+
+func (*HealthCheck_TcpHealthCheck_) isHealthCheck_HealthChecker() {}
+
+func (*HealthCheck_GrpcHealthCheck_) isHealthCheck_HealthChecker() {}
+
+func (*HealthCheck_CustomHealthCheck_) isHealthCheck_HealthChecker() {}
+
+// Describes the encoding of the payload bytes in the payload.
+type HealthCheck_Payload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Payload:
+ // *HealthCheck_Payload_Text
+ // *HealthCheck_Payload_Binary
+ Payload isHealthCheck_Payload_Payload `protobuf_oneof:"payload"`
+}
+
+func (x *HealthCheck_Payload) Reset() {
+ *x = HealthCheck_Payload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_Payload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_Payload) ProtoMessage() {}
+
+func (x *HealthCheck_Payload) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_Payload.ProtoReflect.Descriptor instead.
+func (*HealthCheck_Payload) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (m *HealthCheck_Payload) GetPayload() isHealthCheck_Payload_Payload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (x *HealthCheck_Payload) GetText() string {
+ if x, ok := x.GetPayload().(*HealthCheck_Payload_Text); ok {
+ return x.Text
+ }
+ return ""
+}
+
+func (x *HealthCheck_Payload) GetBinary() []byte {
+ if x, ok := x.GetPayload().(*HealthCheck_Payload_Binary); ok {
+ return x.Binary
+ }
+ return nil
+}
+
+type isHealthCheck_Payload_Payload interface {
+ isHealthCheck_Payload_Payload()
+}
+
+type HealthCheck_Payload_Text struct {
+ // Hex encoded payload. E.g., "000000FF".
+ Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"`
+}
+
+type HealthCheck_Payload_Binary struct {
+ // [#not-implemented-hide:] Binary payload.
+ Binary []byte `protobuf:"bytes,2,opt,name=binary,proto3,oneof"`
+}
+
+func (*HealthCheck_Payload_Text) isHealthCheck_Payload_Payload() {}
+
+func (*HealthCheck_Payload_Binary) isHealthCheck_Payload_Payload() {}
+
+// [#next-free-field: 12]
+type HealthCheck_HttpHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The value of the host header in the HTTP health check request. If
+ // left empty (default value), the name of the cluster this health check is associated
+ // with will be used. The host header can be customized for a specific endpoint by setting the
+ // :ref:`hostname ` field.
+ Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+ // Specifies the HTTP path that will be requested during health checking. For example
+ // */healthcheck*.
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+ // [#not-implemented-hide:] HTTP specific payload.
+ Send *HealthCheck_Payload `protobuf:"bytes,3,opt,name=send,proto3" json:"send,omitempty"`
+ // [#not-implemented-hide:] HTTP specific response.
+ Receive *HealthCheck_Payload `protobuf:"bytes,4,opt,name=receive,proto3" json:"receive,omitempty"`
+ // Specifies a list of HTTP headers that should be added to each request that is sent to the
+ // health checked cluster. For more information, including details on header value syntax, see
+ // the documentation on :ref:`custom request headers
+ // `.
+ RequestHeadersToAdd []*HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"`
+ // Specifies a list of HTTP headers that should be removed from each request that is sent to the
+ // health checked cluster.
+ RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"`
+ // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default
+ // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open
+ // semantics of :ref:`Int64Range `. The start and end of each
+ // range are required. Only statuses in the range [100, 600) are allowed.
+ ExpectedStatuses []*v3.Int64Range `protobuf:"bytes,9,rep,name=expected_statuses,json=expectedStatuses,proto3" json:"expected_statuses,omitempty"`
+ // Use specified application protocol for health checks.
+ CodecClientType v3.CodecClientType `protobuf:"varint,10,opt,name=codec_client_type,json=codecClientType,proto3,enum=envoy.type.v3.CodecClientType" json:"codec_client_type,omitempty"`
+ // An optional service name parameter which is used to validate the identity of
+ // the health checked cluster using a :ref:`StringMatcher
+ // `. See the :ref:`architecture overview
+ // ` for more information.
+ ServiceNameMatcher *v31.StringMatcher `protobuf:"bytes,11,opt,name=service_name_matcher,json=serviceNameMatcher,proto3" json:"service_name_matcher,omitempty"`
+ // Deprecated: Do not use.
+ HiddenEnvoyDeprecatedServiceName string `protobuf:"bytes,5,opt,name=hidden_envoy_deprecated_service_name,json=hiddenEnvoyDeprecatedServiceName,proto3" json:"hidden_envoy_deprecated_service_name,omitempty"`
+ // Deprecated: Do not use.
+ HiddenEnvoyDeprecatedUseHttp2 bool `protobuf:"varint,7,opt,name=hidden_envoy_deprecated_use_http2,json=hiddenEnvoyDeprecatedUseHttp2,proto3" json:"hidden_envoy_deprecated_use_http2,omitempty"`
+}
+
+func (x *HealthCheck_HttpHealthCheck) Reset() {
+ *x = HealthCheck_HttpHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_HttpHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_HttpHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_HttpHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_HttpHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_HttpHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetSend() *HealthCheck_Payload {
+ if x != nil {
+ return x.Send
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetReceive() *HealthCheck_Payload {
+ if x != nil {
+ return x.Receive
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToAdd() []*HeaderValueOption {
+ if x != nil {
+ return x.RequestHeadersToAdd
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToRemove() []string {
+ if x != nil {
+ return x.RequestHeadersToRemove
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetExpectedStatuses() []*v3.Int64Range {
+ if x != nil {
+ return x.ExpectedStatuses
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetCodecClientType() v3.CodecClientType {
+ if x != nil {
+ return x.CodecClientType
+ }
+ return v3.CodecClientType_HTTP1
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetServiceNameMatcher() *v31.StringMatcher {
+ if x != nil {
+ return x.ServiceNameMatcher
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *HealthCheck_HttpHealthCheck) GetHiddenEnvoyDeprecatedServiceName() string {
+ if x != nil {
+ return x.HiddenEnvoyDeprecatedServiceName
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (x *HealthCheck_HttpHealthCheck) GetHiddenEnvoyDeprecatedUseHttp2() bool {
+ if x != nil {
+ return x.HiddenEnvoyDeprecatedUseHttp2
+ }
+ return false
+}
+
+type HealthCheck_TcpHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Empty payloads imply a connect-only health check.
+ Send *HealthCheck_Payload `protobuf:"bytes,1,opt,name=send,proto3" json:"send,omitempty"`
+ // When checking the response, “fuzzy” matching is performed such that each
+ // binary block must be found, and in the order specified, but not
+ // necessarily contiguous.
+ Receive []*HealthCheck_Payload `protobuf:"bytes,2,rep,name=receive,proto3" json:"receive,omitempty"`
+}
+
+func (x *HealthCheck_TcpHealthCheck) Reset() {
+ *x = HealthCheck_TcpHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_TcpHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_TcpHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_TcpHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_TcpHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_TcpHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *HealthCheck_TcpHealthCheck) GetSend() *HealthCheck_Payload {
+ if x != nil {
+ return x.Send
+ }
+ return nil
+}
+
+func (x *HealthCheck_TcpHealthCheck) GetReceive() []*HealthCheck_Payload {
+ if x != nil {
+ return x.Receive
+ }
+ return nil
+}
+
+type HealthCheck_RedisHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value
+ // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other
+ // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance
+ // by setting the specified key to any value and waiting for traffic to drain.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (x *HealthCheck_RedisHealthCheck) Reset() {
+ *x = HealthCheck_RedisHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_RedisHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_RedisHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_RedisHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_RedisHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_RedisHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0, 3}
+}
+
+func (x *HealthCheck_RedisHealthCheck) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+// `grpc.health.v1.Health
+// `_-based
+// healthcheck. See `gRPC doc `_
+// for details.
+type HealthCheck_GrpcHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An optional service name parameter which will be sent to gRPC service in
+ // `grpc.health.v1.HealthCheckRequest
+ // `_.
+ // message. See `gRPC health-checking overview
+ // `_ for more information.
+ ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ // The value of the :authority header in the gRPC health check request. If
+ // left empty (default value), the name of the cluster this health check is associated
+ // with will be used. The authority header can be customized for a specific endpoint by setting
+ // the :ref:`hostname ` field.
+ Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"`
+}
+
+func (x *HealthCheck_GrpcHealthCheck) Reset() {
+ *x = HealthCheck_GrpcHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_GrpcHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_GrpcHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_GrpcHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_GrpcHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_GrpcHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0, 4}
+}
+
+func (x *HealthCheck_GrpcHealthCheck) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+func (x *HealthCheck_GrpcHealthCheck) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+// Custom health check.
+type HealthCheck_CustomHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The registered name of the custom health checker.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A custom health checker specific configuration which depends on the custom health checker
+ // being instantiated. See :api:`envoy/config/health_checker` for reference.
+ // [#extension-category: envoy.health_checkers]
+ //
+ // Types that are assignable to ConfigType:
+ // *HealthCheck_CustomHealthCheck_TypedConfig
+ // *HealthCheck_CustomHealthCheck_HiddenEnvoyDeprecatedConfig
+ ConfigType isHealthCheck_CustomHealthCheck_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *HealthCheck_CustomHealthCheck) Reset() {
+ *x = HealthCheck_CustomHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_CustomHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_CustomHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_CustomHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_CustomHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_CustomHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0, 5}
+}
+
+func (x *HealthCheck_CustomHealthCheck) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *HealthCheck_CustomHealthCheck) GetConfigType() isHealthCheck_CustomHealthCheck_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *any.Any {
+ if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *HealthCheck_CustomHealthCheck) GetHiddenEnvoyDeprecatedConfig() *_struct.Struct {
+ if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_HiddenEnvoyDeprecatedConfig); ok {
+ return x.HiddenEnvoyDeprecatedConfig
+ }
+ return nil
+}
+
+type isHealthCheck_CustomHealthCheck_ConfigType interface {
+ isHealthCheck_CustomHealthCheck_ConfigType()
+}
+
+type HealthCheck_CustomHealthCheck_TypedConfig struct {
+ TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+type HealthCheck_CustomHealthCheck_HiddenEnvoyDeprecatedConfig struct {
+ // Deprecated: Do not use.
+ HiddenEnvoyDeprecatedConfig *_struct.Struct `protobuf:"bytes,2,opt,name=hidden_envoy_deprecated_config,json=hiddenEnvoyDeprecatedConfig,proto3,oneof"`
+}
+
+func (*HealthCheck_CustomHealthCheck_TypedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {}
+
+func (*HealthCheck_CustomHealthCheck_HiddenEnvoyDeprecatedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {
+}
+
+// Health checks occur over the transport socket specified for the cluster. This implies that if a
+// cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.
+//
+// This allows overriding the cluster TLS settings, just for health check connections.
+type HealthCheck_TlsOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies the ALPN protocols for health check connections. This is useful if the
+ // corresponding upstream is using ALPN-based :ref:`FilterChainMatch
+ // ` along with different protocols for health checks
+ // versus data connections. If empty, no ALPN protocols will be set on health check connections.
+ AlpnProtocols []string `protobuf:"bytes,1,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"`
+}
+
+func (x *HealthCheck_TlsOptions) Reset() {
+ *x = HealthCheck_TlsOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_TlsOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_TlsOptions) ProtoMessage() {}
+
+func (x *HealthCheck_TlsOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_TlsOptions.ProtoReflect.Descriptor instead.
+func (*HealthCheck_TlsOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0, 6}
+}
+
+func (x *HealthCheck_TlsOptions) GetAlpnProtocols() []string {
+ if x != nil {
+ return x.AlpnProtocols
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_health_check_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x1d, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x08, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69,
+ 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a,
+ 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x50, 0x65,
+ 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x12, 0x75, 0x6e, 0x68, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53,
+ 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68,
+ 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74,
+ 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68,
+ 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x10,
+ 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x11, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x12, 0x5f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x12, 0x65, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x53, 0x0a, 0x13, 0x6e, 0x6f,
+ 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x6e, 0x6f,
+ 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12,
+ 0x62, 0x0a, 0x1b, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x18,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x18, 0x6e, 0x6f, 0x54, 0x72, 0x61,
+ 0x66, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa,
+ 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x5b, 0x0a, 0x17, 0x75, 0x6e, 0x68, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76,
+ 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x15, 0x75,
+ 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f,
+ 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08,
+ 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a,
+ 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50,
+ 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67,
+ 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x61,
+ 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, 0x6c,
+ 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0b, 0x74, 0x6c,
+ 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x74,
+ 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, 0x1f, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x17, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x1c, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, 0x01, 0x0a, 0x07, 0x50, 0x61,
+ 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04,
+ 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x3a, 0x2c,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x0e, 0x0a, 0x07,
+ 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xf5, 0x06, 0x0a,
+ 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b,
+ 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x04, 0x68, 0x6f, 0x73,
+ 0x74, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73,
+ 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52,
+ 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61,
+ 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64,
+ 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08,
+ 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06,
+ 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46,
+ 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
+ 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64,
+ 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x56, 0x0a, 0x14,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x24, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x5f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52,
+ 0x20, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x44, 0x65, 0x70, 0x72,
+ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x5b, 0x0a, 0x21, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x73, 0x65,
+ 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, 0x18, 0x01,
+ 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x52,
+ 0x1d, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x44, 0x65, 0x70, 0x72,
+ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x48, 0x74, 0x74, 0x70, 0x32, 0x3a, 0x34,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xc9, 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
+ 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76,
+ 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f,
+ 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x1a, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x64,
+ 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x95, 0x01,
+ 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74,
+ 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01,
+ 0x02, 0xc8, 0x01, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x3a,
+ 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x9f, 0x02, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x1e, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x5f, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74,
+ 0x72, 0x75, 0x63, 0x74, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e,
+ 0x30, 0x48, 0x00, 0x52, 0x1b, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x45, 0x6e, 0x76, 0x6f, 0x79,
+ 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x64, 0x0a, 0x0a, 0x54, 0x6c, 0x73, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61,
+ 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x3a, 0x2f, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x24, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b,
+ 0x2a, 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a,
+ 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e,
+ 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41,
+ 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f,
+ 0x55, 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44,
+ 0x10, 0x05, 0x42, 0x40, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_health_check_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_health_check_proto_rawDescData = file_envoy_config_core_v3_health_check_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_health_check_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_health_check_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_health_check_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_health_check_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_health_check_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_health_check_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_health_check_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_envoy_config_core_v3_health_check_proto_goTypes = []interface{}{
+ (HealthStatus)(0), // 0: envoy.config.core.v3.HealthStatus
+ (*HealthCheck)(nil), // 1: envoy.config.core.v3.HealthCheck
+ (*HealthCheck_Payload)(nil), // 2: envoy.config.core.v3.HealthCheck.Payload
+ (*HealthCheck_HttpHealthCheck)(nil), // 3: envoy.config.core.v3.HealthCheck.HttpHealthCheck
+ (*HealthCheck_TcpHealthCheck)(nil), // 4: envoy.config.core.v3.HealthCheck.TcpHealthCheck
+ (*HealthCheck_RedisHealthCheck)(nil), // 5: envoy.config.core.v3.HealthCheck.RedisHealthCheck
+ (*HealthCheck_GrpcHealthCheck)(nil), // 6: envoy.config.core.v3.HealthCheck.GrpcHealthCheck
+ (*HealthCheck_CustomHealthCheck)(nil), // 7: envoy.config.core.v3.HealthCheck.CustomHealthCheck
+ (*HealthCheck_TlsOptions)(nil), // 8: envoy.config.core.v3.HealthCheck.TlsOptions
+ (*duration.Duration)(nil), // 9: google.protobuf.Duration
+ (*wrappers.UInt32Value)(nil), // 10: google.protobuf.UInt32Value
+ (*wrappers.BoolValue)(nil), // 11: google.protobuf.BoolValue
+ (*EventServiceConfig)(nil), // 12: envoy.config.core.v3.EventServiceConfig
+ (*_struct.Struct)(nil), // 13: google.protobuf.Struct
+ (*HeaderValueOption)(nil), // 14: envoy.config.core.v3.HeaderValueOption
+ (*v3.Int64Range)(nil), // 15: envoy.type.v3.Int64Range
+ (v3.CodecClientType)(0), // 16: envoy.type.v3.CodecClientType
+ (*v31.StringMatcher)(nil), // 17: envoy.type.matcher.v3.StringMatcher
+ (*any.Any)(nil), // 18: google.protobuf.Any
+}
+var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{
+ 9, // 0: envoy.config.core.v3.HealthCheck.timeout:type_name -> google.protobuf.Duration
+ 9, // 1: envoy.config.core.v3.HealthCheck.interval:type_name -> google.protobuf.Duration
+ 9, // 2: envoy.config.core.v3.HealthCheck.initial_jitter:type_name -> google.protobuf.Duration
+ 9, // 3: envoy.config.core.v3.HealthCheck.interval_jitter:type_name -> google.protobuf.Duration
+ 10, // 4: envoy.config.core.v3.HealthCheck.unhealthy_threshold:type_name -> google.protobuf.UInt32Value
+ 10, // 5: envoy.config.core.v3.HealthCheck.healthy_threshold:type_name -> google.protobuf.UInt32Value
+ 10, // 6: envoy.config.core.v3.HealthCheck.alt_port:type_name -> google.protobuf.UInt32Value
+ 11, // 7: envoy.config.core.v3.HealthCheck.reuse_connection:type_name -> google.protobuf.BoolValue
+ 3, // 8: envoy.config.core.v3.HealthCheck.http_health_check:type_name -> envoy.config.core.v3.HealthCheck.HttpHealthCheck
+ 4, // 9: envoy.config.core.v3.HealthCheck.tcp_health_check:type_name -> envoy.config.core.v3.HealthCheck.TcpHealthCheck
+ 6, // 10: envoy.config.core.v3.HealthCheck.grpc_health_check:type_name -> envoy.config.core.v3.HealthCheck.GrpcHealthCheck
+ 7, // 11: envoy.config.core.v3.HealthCheck.custom_health_check:type_name -> envoy.config.core.v3.HealthCheck.CustomHealthCheck
+ 9, // 12: envoy.config.core.v3.HealthCheck.no_traffic_interval:type_name -> google.protobuf.Duration
+ 9, // 13: envoy.config.core.v3.HealthCheck.no_traffic_healthy_interval:type_name -> google.protobuf.Duration
+ 9, // 14: envoy.config.core.v3.HealthCheck.unhealthy_interval:type_name -> google.protobuf.Duration
+ 9, // 15: envoy.config.core.v3.HealthCheck.unhealthy_edge_interval:type_name -> google.protobuf.Duration
+ 9, // 16: envoy.config.core.v3.HealthCheck.healthy_edge_interval:type_name -> google.protobuf.Duration
+ 12, // 17: envoy.config.core.v3.HealthCheck.event_service:type_name -> envoy.config.core.v3.EventServiceConfig
+ 8, // 18: envoy.config.core.v3.HealthCheck.tls_options:type_name -> envoy.config.core.v3.HealthCheck.TlsOptions
+ 13, // 19: envoy.config.core.v3.HealthCheck.transport_socket_match_criteria:type_name -> google.protobuf.Struct
+ 2, // 20: envoy.config.core.v3.HealthCheck.HttpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 2, // 21: envoy.config.core.v3.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 14, // 22: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption
+ 15, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range
+ 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType
+ 17, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher
+ 2, // 26: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 2, // 27: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 18, // 28: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any
+ 13, // 29: envoy.config.core.v3.HealthCheck.CustomHealthCheck.hidden_envoy_deprecated_config:type_name -> google.protobuf.Struct
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_health_check_proto_init() }
+func file_envoy_config_core_v3_health_check_proto_init() {
+ if File_envoy_config_core_v3_health_check_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_base_proto_init()
+ file_envoy_config_core_v3_event_service_config_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_health_check_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_Payload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_HttpHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_TcpHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_RedisHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_GrpcHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_CustomHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_TlsOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*HealthCheck_HttpHealthCheck_)(nil),
+ (*HealthCheck_TcpHealthCheck_)(nil),
+ (*HealthCheck_GrpcHealthCheck_)(nil),
+ (*HealthCheck_CustomHealthCheck_)(nil),
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*HealthCheck_Payload_Text)(nil),
+ (*HealthCheck_Payload_Binary)(nil),
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[6].OneofWrappers = []interface{}{
+ (*HealthCheck_CustomHealthCheck_TypedConfig)(nil),
+ (*HealthCheck_CustomHealthCheck_HiddenEnvoyDeprecatedConfig)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_health_check_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_health_check_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_health_check_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_health_check_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_health_check_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_health_check_proto = out.File
+ file_envoy_config_core_v3_health_check_proto_rawDesc = nil
+ file_envoy_config_core_v3_health_check_proto_goTypes = nil
+ file_envoy_config_core_v3_health_check_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go
new file mode 100644
index 000000000..d322a437c
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go
@@ -0,0 +1,1117 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/health_check.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+
+ _ = v3.CodecClientType(0)
+)
+
+// Validate checks the field values on HealthCheck with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *HealthCheck) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetTimeout() == nil {
+ return HealthCheckValidationError{
+ field: "Timeout",
+ reason: "value is required",
+ }
+ }
+
+ if d := m.GetTimeout(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HealthCheckValidationError{
+ field: "Timeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return HealthCheckValidationError{
+ field: "Timeout",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ if m.GetInterval() == nil {
+ return HealthCheckValidationError{
+ field: "Interval",
+ reason: "value is required",
+ }
+ }
+
+ if d := m.GetInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HealthCheckValidationError{
+ field: "Interval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return HealthCheckValidationError{
+ field: "Interval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetInitialJitter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "InitialJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for IntervalJitterPercent
+
+ if m.GetUnhealthyThreshold() == nil {
+ return HealthCheckValidationError{
+ field: "UnhealthyThreshold",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetUnhealthyThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "UnhealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if m.GetHealthyThreshold() == nil {
+ return HealthCheckValidationError{
+ field: "HealthyThreshold",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetHealthyThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "HealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetAltPort()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "AltPort",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetReuseConnection()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "ReuseConnection",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetNoTrafficInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HealthCheckValidationError{
+ field: "NoTrafficInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return HealthCheckValidationError{
+ field: "NoTrafficInterval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ if d := m.GetNoTrafficHealthyInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HealthCheckValidationError{
+ field: "NoTrafficHealthyInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return HealthCheckValidationError{
+ field: "NoTrafficHealthyInterval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ if d := m.GetUnhealthyInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HealthCheckValidationError{
+ field: "UnhealthyInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return HealthCheckValidationError{
+ field: "UnhealthyInterval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ if d := m.GetUnhealthyEdgeInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HealthCheckValidationError{
+ field: "UnhealthyEdgeInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return HealthCheckValidationError{
+ field: "UnhealthyEdgeInterval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ if d := m.GetHealthyEdgeInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HealthCheckValidationError{
+ field: "HealthyEdgeInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return HealthCheckValidationError{
+ field: "HealthyEdgeInterval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ // no validation rules for EventLogPath
+
+ if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for AlwaysLogHealthCheckFailures
+
+ if v, ok := interface{}(m.GetTlsOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "TlsOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetTransportSocketMatchCriteria()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "TransportSocketMatchCriteria",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ switch m.HealthChecker.(type) {
+
+ case *HealthCheck_HttpHealthCheck_:
+
+ if v, ok := interface{}(m.GetHttpHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "HttpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *HealthCheck_TcpHealthCheck_:
+
+ if v, ok := interface{}(m.GetTcpHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "TcpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *HealthCheck_GrpcHealthCheck_:
+
+ if v, ok := interface{}(m.GetGrpcHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "GrpcHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *HealthCheck_CustomHealthCheck_:
+
+ if v, ok := interface{}(m.GetCustomHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "CustomHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return HealthCheckValidationError{
+ field: "HealthChecker",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// HealthCheckValidationError is the validation error returned by
+// HealthCheck.Validate if the designated constraints aren't met.
+type HealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheckValidationError) ErrorName() string { return "HealthCheckValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_Payload with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HealthCheck_Payload) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Payload.(type) {
+
+ case *HealthCheck_Payload_Text:
+
+ if utf8.RuneCountInString(m.GetText()) < 1 {
+ return HealthCheck_PayloadValidationError{
+ field: "Text",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ case *HealthCheck_Payload_Binary:
+ // no validation rules for Binary
+
+ default:
+ return HealthCheck_PayloadValidationError{
+ field: "Payload",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// HealthCheck_PayloadValidationError is the validation error returned by
+// HealthCheck_Payload.Validate if the designated constraints aren't met.
+type HealthCheck_PayloadValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_PayloadValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_PayloadValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_PayloadValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_PayloadValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_PayloadValidationError) ErrorName() string {
+ return "HealthCheck_PayloadValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_PayloadValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_Payload.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_PayloadValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_PayloadValidationError{}
+
+// Validate checks the field values on HealthCheck_HttpHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HealthCheck_HttpHealthCheck) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if !_HealthCheck_HttpHealthCheck_Host_Pattern.MatchString(m.GetHost()) {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "Host",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if !_HealthCheck_HttpHealthCheck_Path_Pattern.MatchString(m.GetPath()) {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "Path",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetReceive()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "Receive",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(m.GetRequestHeadersToAdd()) > 1000 {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "RequestHeadersToAdd",
+ reason: "value must contain no more than 1000 item(s)",
+ }
+ }
+
+ for idx, item := range m.GetRequestHeadersToAdd() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetRequestHeadersToRemove() {
+ _, _ = idx, item
+
+ if !_HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern.MatchString(item) {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx),
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetExpectedStatuses() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("ExpectedStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if _, ok := v3.CodecClientType_name[int32(m.GetCodecClientType())]; !ok {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "CodecClientType",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ if v, ok := interface{}(m.GetServiceNameMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "ServiceNameMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for HiddenEnvoyDeprecatedServiceName
+
+ // no validation rules for HiddenEnvoyDeprecatedUseHttp2
+
+ return nil
+}
+
+// HealthCheck_HttpHealthCheckValidationError is the validation error returned
+// by HealthCheck_HttpHealthCheck.Validate if the designated constraints
+// aren't met.
+type HealthCheck_HttpHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_HttpHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_HttpHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_HttpHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_HttpHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_HttpHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_HttpHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_HttpHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_HttpHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_HttpHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_HttpHealthCheckValidationError{}
+
+var _HealthCheck_HttpHealthCheck_Host_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _HealthCheck_HttpHealthCheck_Path_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HealthCheck_TcpHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HealthCheck_TcpHealthCheck) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_TcpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetReceive() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_TcpHealthCheckValidationError{
+ field: fmt.Sprintf("Receive[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// HealthCheck_TcpHealthCheckValidationError is the validation error returned
+// by HealthCheck_TcpHealthCheck.Validate if the designated constraints aren't met.
+type HealthCheck_TcpHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_TcpHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_TcpHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_TcpHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_TcpHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_TcpHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_TcpHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_TcpHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_TcpHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_TcpHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_TcpHealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_RedisHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HealthCheck_RedisHealthCheck) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Key
+
+ return nil
+}
+
+// HealthCheck_RedisHealthCheckValidationError is the validation error returned
+// by HealthCheck_RedisHealthCheck.Validate if the designated constraints
+// aren't met.
+type HealthCheck_RedisHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_RedisHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_RedisHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_RedisHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_RedisHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_RedisHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_RedisHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_RedisHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_RedisHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_RedisHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_RedisHealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_GrpcHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HealthCheck_GrpcHealthCheck) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for ServiceName
+
+ if !_HealthCheck_GrpcHealthCheck_Authority_Pattern.MatchString(m.GetAuthority()) {
+ return HealthCheck_GrpcHealthCheckValidationError{
+ field: "Authority",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ return nil
+}
+
+// HealthCheck_GrpcHealthCheckValidationError is the validation error returned
+// by HealthCheck_GrpcHealthCheck.Validate if the designated constraints
+// aren't met.
+type HealthCheck_GrpcHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_GrpcHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_GrpcHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_GrpcHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_GrpcHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_GrpcHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_GrpcHealthCheckValidationError{}
+
+var _HealthCheck_GrpcHealthCheck_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HealthCheck_CustomHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HealthCheck_CustomHealthCheck) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ return HealthCheck_CustomHealthCheckValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ switch m.ConfigType.(type) {
+
+ case *HealthCheck_CustomHealthCheck_TypedConfig:
+
+ if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_CustomHealthCheckValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *HealthCheck_CustomHealthCheck_HiddenEnvoyDeprecatedConfig:
+
+ if v, ok := interface{}(m.GetHiddenEnvoyDeprecatedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_CustomHealthCheckValidationError{
+ field: "HiddenEnvoyDeprecatedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// HealthCheck_CustomHealthCheckValidationError is the validation error
+// returned by HealthCheck_CustomHealthCheck.Validate if the designated
+// constraints aren't met.
+type HealthCheck_CustomHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_CustomHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_CustomHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_CustomHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_CustomHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_CustomHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_CustomHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_CustomHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_CustomHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_CustomHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_CustomHealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_TlsOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HealthCheck_TlsOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// HealthCheck_TlsOptionsValidationError is the validation error returned by
+// HealthCheck_TlsOptions.Validate if the designated constraints aren't met.
+type HealthCheck_TlsOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_TlsOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_TlsOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_TlsOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_TlsOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_TlsOptionsValidationError) ErrorName() string {
+ return "HealthCheck_TlsOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_TlsOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_TlsOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_TlsOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_TlsOptionsValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go
new file mode 100644
index 000000000..e614302dd
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go
@@ -0,0 +1,237 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/http_uri.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Envoy external URI descriptor
+type HttpUri struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP server URI. It should be a full FQDN with protocol, host and path.
+ //
+ // Example:
+ //
+ // .. code-block:: yaml
+ //
+ // uri: https://www.googleapis.com/oauth2/v1/certs
+ //
+ Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
+ // Specify how `uri` is to be fetched. Today, this requires an explicit
+ // cluster, but in the future we may support dynamic cluster creation or
+ // inline DNS resolution. See `issue
+ // `_.
+ //
+ // Types that are assignable to HttpUpstreamType:
+ // *HttpUri_Cluster
+ HttpUpstreamType isHttpUri_HttpUpstreamType `protobuf_oneof:"http_upstream_type"`
+ // Sets the maximum duration in milliseconds that a response can take to arrive upon request.
+ Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
+}
+
+func (x *HttpUri) Reset() {
+ *x = HttpUri{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpUri) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpUri) ProtoMessage() {}
+
+func (x *HttpUri) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpUri.ProtoReflect.Descriptor instead.
+func (*HttpUri) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_http_uri_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HttpUri) GetUri() string {
+ if x != nil {
+ return x.Uri
+ }
+ return ""
+}
+
+func (m *HttpUri) GetHttpUpstreamType() isHttpUri_HttpUpstreamType {
+ if m != nil {
+ return m.HttpUpstreamType
+ }
+ return nil
+}
+
+func (x *HttpUri) GetCluster() string {
+ if x, ok := x.GetHttpUpstreamType().(*HttpUri_Cluster); ok {
+ return x.Cluster
+ }
+ return ""
+}
+
+func (x *HttpUri) GetTimeout() *duration.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+type isHttpUri_HttpUpstreamType interface {
+ isHttpUri_HttpUpstreamType()
+}
+
+type HttpUri_Cluster struct {
+ // A cluster is created in the Envoy "cluster_manager" config
+ // section. This field specifies the cluster name.
+ //
+ // Example:
+ //
+ // .. code-block:: yaml
+ //
+ // cluster: jwks_cluster
+ //
+ Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3,oneof"`
+}
+
+func (*HttpUri_Cluster) isHttpUri_HttpUpstreamType() {}
+
+var File_envoy_config_core_v3_http_uri_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_http_uri_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61,
+ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc7, 0x01, 0x0a, 0x07, 0x48, 0x74, 0x74, 0x70, 0x55,
+ 0x72, 0x69, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x23, 0x0a,
+ 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a,
+ 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x32, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74,
+ 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x19, 0x0a, 0x12, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x70,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x42, 0x3c, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_http_uri_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_http_uri_proto_rawDescData = file_envoy_config_core_v3_http_uri_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_http_uri_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_http_uri_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_http_uri_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_http_uri_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_http_uri_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_http_uri_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_http_uri_proto_goTypes = []interface{}{
+ (*HttpUri)(nil), // 0: envoy.config.core.v3.HttpUri
+ (*duration.Duration)(nil), // 1: google.protobuf.Duration
+}
+var file_envoy_config_core_v3_http_uri_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.HttpUri.timeout:type_name -> google.protobuf.Duration
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_http_uri_proto_init() }
+func file_envoy_config_core_v3_http_uri_proto_init() {
+ if File_envoy_config_core_v3_http_uri_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_http_uri_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpUri); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_http_uri_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*HttpUri_Cluster)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_http_uri_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_http_uri_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_http_uri_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_http_uri_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_http_uri_proto = out.File
+ file_envoy_config_core_v3_http_uri_proto_rawDesc = nil
+ file_envoy_config_core_v3_http_uri_proto_goTypes = nil
+ file_envoy_config_core_v3_http_uri_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go
new file mode 100644
index 000000000..cfcf922f8
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go
@@ -0,0 +1,152 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/http_uri.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on HttpUri with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *HttpUri) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetUri()) < 1 {
+ return HttpUriValidationError{
+ field: "Uri",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if m.GetTimeout() == nil {
+ return HttpUriValidationError{
+ field: "Timeout",
+ reason: "value is required",
+ }
+ }
+
+ if d := m.GetTimeout(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return HttpUriValidationError{
+ field: "Timeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gte := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur < gte {
+ return HttpUriValidationError{
+ field: "Timeout",
+ reason: "value must be greater than or equal to 0s",
+ }
+ }
+
+ }
+
+ switch m.HttpUpstreamType.(type) {
+
+ case *HttpUri_Cluster:
+
+ if utf8.RuneCountInString(m.GetCluster()) < 1 {
+ return HttpUriValidationError{
+ field: "Cluster",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ default:
+ return HttpUriValidationError{
+ field: "HttpUpstreamType",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// HttpUriValidationError is the validation error returned by HttpUri.Validate
+// if the designated constraints aren't met.
+type HttpUriValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpUriValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpUriValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpUriValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpUriValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpUriValidationError) ErrorName() string { return "HttpUriValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HttpUriValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpUri.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpUriValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpUriValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go
new file mode 100644
index 000000000..0340572fd
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go
@@ -0,0 +1,1841 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/protocol.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Action to take when Envoy receives client request with header names containing underscore
+// characters.
+// Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented
+// as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore
+// characters.
+type HttpProtocolOptions_HeadersWithUnderscoresAction int32
+
+const (
+ // Allow headers with underscores. This is the default behavior.
+ HttpProtocolOptions_ALLOW HttpProtocolOptions_HeadersWithUnderscoresAction = 0
+ // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests
+ // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter
+ // is incremented for each rejected request.
+ HttpProtocolOptions_REJECT_REQUEST HttpProtocolOptions_HeadersWithUnderscoresAction = 1
+ // Drop the header with name containing underscores. The header is dropped before the filter chain is
+ // invoked and as such filters will not see dropped headers. The
+ // "httpN.dropped_headers_with_underscores" is incremented for each dropped header.
+ HttpProtocolOptions_DROP_HEADER HttpProtocolOptions_HeadersWithUnderscoresAction = 2
+)
+
+// Enum value maps for HttpProtocolOptions_HeadersWithUnderscoresAction.
+var (
+ HttpProtocolOptions_HeadersWithUnderscoresAction_name = map[int32]string{
+ 0: "ALLOW",
+ 1: "REJECT_REQUEST",
+ 2: "DROP_HEADER",
+ }
+ HttpProtocolOptions_HeadersWithUnderscoresAction_value = map[string]int32{
+ "ALLOW": 0,
+ "REJECT_REQUEST": 1,
+ "DROP_HEADER": 2,
+ }
+)
+
+func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Enum() *HttpProtocolOptions_HeadersWithUnderscoresAction {
+ p := new(HttpProtocolOptions_HeadersWithUnderscoresAction)
+ *p = x
+ return p
+}
+
+func (x HttpProtocolOptions_HeadersWithUnderscoresAction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HttpProtocolOptions_HeadersWithUnderscoresAction) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_protocol_proto_enumTypes[0].Descriptor()
+}
+
+func (HttpProtocolOptions_HeadersWithUnderscoresAction) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_protocol_proto_enumTypes[0]
+}
+
+func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HttpProtocolOptions_HeadersWithUnderscoresAction.Descriptor instead.
+func (HttpProtocolOptions_HeadersWithUnderscoresAction) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4, 0}
+}
+
+// [#not-implemented-hide:]
+type TcpProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *TcpProtocolOptions) Reset() {
+ *x = TcpProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TcpProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TcpProtocolOptions) ProtoMessage() {}
+
+func (x *TcpProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TcpProtocolOptions.ProtoReflect.Descriptor instead.
+func (*TcpProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{0}
+}
+
+// QUIC protocol options which apply to both downstream and upstream connections.
+type QuicProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Maximum number of streams that the client can negotiate per connection. 100
+ // if not specified.
+ MaxConcurrentStreams *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"`
+ // `Initial stream-level flow-control receive window
+ // `_ size. Valid values range from
+ // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16).
+ //
+ // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead.
+ // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum.
+ //
+ // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the
+ // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to
+ // stop the flow of data to the stream buffers.
+ InitialStreamWindowSize *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"`
+ // Similar to *initial_stream_window_size*, but for connection-level
+ // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16).
+ // window. Currently, this has the same minimum/default as *initial_stream_window_size*.
+ //
+ // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default
+ // window size now, so it's also the minimum.
+ InitialConnectionWindowSize *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"`
+}
+
+func (x *QuicProtocolOptions) Reset() {
+ *x = QuicProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QuicProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuicProtocolOptions) ProtoMessage() {}
+
+func (x *QuicProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuicProtocolOptions.ProtoReflect.Descriptor instead.
+func (*QuicProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *QuicProtocolOptions) GetMaxConcurrentStreams() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxConcurrentStreams
+ }
+ return nil
+}
+
+func (x *QuicProtocolOptions) GetInitialStreamWindowSize() *wrappers.UInt32Value {
+ if x != nil {
+ return x.InitialStreamWindowSize
+ }
+ return nil
+}
+
+func (x *QuicProtocolOptions) GetInitialConnectionWindowSize() *wrappers.UInt32Value {
+ if x != nil {
+ return x.InitialConnectionWindowSize
+ }
+ return nil
+}
+
+type UpstreamHttpProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Set transport socket `SNI `_ for new
+ // upstream connections based on the downstream HTTP host/authority header, as seen by the
+ // :ref:`router filter `.
+ AutoSni bool `protobuf:"varint,1,opt,name=auto_sni,json=autoSni,proto3" json:"auto_sni,omitempty"`
+ // Automatic validate upstream presented certificate for new upstream connections based on the
+ // downstream HTTP host/authority header, as seen by the
+ // :ref:`router filter `.
+ // This field is intended to set with `auto_sni` field.
+ AutoSanValidation bool `protobuf:"varint,2,opt,name=auto_san_validation,json=autoSanValidation,proto3" json:"auto_san_validation,omitempty"`
+}
+
+func (x *UpstreamHttpProtocolOptions) Reset() {
+ *x = UpstreamHttpProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpstreamHttpProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpstreamHttpProtocolOptions) ProtoMessage() {}
+
+func (x *UpstreamHttpProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpstreamHttpProtocolOptions.ProtoReflect.Descriptor instead.
+func (*UpstreamHttpProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *UpstreamHttpProtocolOptions) GetAutoSni() bool {
+ if x != nil {
+ return x.AutoSni
+ }
+ return false
+}
+
+func (x *UpstreamHttpProtocolOptions) GetAutoSanValidation() bool {
+ if x != nil {
+ return x.AutoSanValidation
+ }
+ return false
+}
+
+// Configures the alternate protocols cache which tracks alternate protocols that can be used to
+// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for
+// HTTP Alternate Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04
+// for the "HTTPS" DNS resource record.
+type AlternateProtocolsCacheOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the cache. Multiple named caches allow independent alternate protocols cache
+ // configurations to operate within a single Envoy process using different configurations. All
+ // alternate protocols cache options with the same name *must* be equal in all fields when
+ // referenced from different configuration components. Configuration will fail to load if this is
+ // not the case.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The maximum number of entries that the cache will hold. If not specified defaults to 1024.
+ //
+ // .. note:
+ //
+ // The implementation is approximate and enforced independently on each worker thread, thus
+ // it is possible for the maximum entries in the cache to go slightly above the configured
+ // value depending on timing. This is similar to how other circuit breakers work.
+ MaxEntries *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+}
+
+func (x *AlternateProtocolsCacheOptions) Reset() {
+ *x = AlternateProtocolsCacheOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AlternateProtocolsCacheOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlternateProtocolsCacheOptions) ProtoMessage() {}
+
+func (x *AlternateProtocolsCacheOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlternateProtocolsCacheOptions.ProtoReflect.Descriptor instead.
+func (*AlternateProtocolsCacheOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *AlternateProtocolsCacheOptions) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AlternateProtocolsCacheOptions) GetMaxEntries() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxEntries
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type HttpProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The idle timeout for connections. The idle timeout is defined as the
+ // period in which there are no active requests. When the
+ // idle timeout is reached the connection will be closed. If the connection is an HTTP/2
+ // downstream connection a drain sequence will occur prior to closing the connection, see
+ // :ref:`drain_timeout
+ // `.
+ // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.
+ // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.
+ //
+ // .. warning::
+ // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP
+ // FIN packets, etc.
+ //
+ // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts"
+ // is configured, this timeout is scaled for downstream connections according to the value for
+ // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `.
+ IdleTimeout *duration.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+ // The maximum duration of a connection. The duration is defined as a period since a connection
+ // was established. If not set, there is no max duration. When max_connection_duration is reached
+ // the connection will be closed. Drain sequence will occur prior to closing the connection if
+ // if's applicable. See :ref:`drain_timeout
+ // `.
+ // Note: not implemented for upstream connections.
+ MaxConnectionDuration *duration.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"`
+ // The maximum number of headers. If unconfigured, the default
+ // maximum number of request headers allowed is 100. Requests that exceed this limit will receive
+ // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.
+ MaxHeadersCount *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"`
+ // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be
+ // reset independent of any other timeouts. If not specified, this value is not set.
+ MaxStreamDuration *duration.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"`
+ // Action to take when a client request with a header name containing underscore characters is received.
+ // If this setting is not specified, the value defaults to ALLOW.
+ // Note: upstream responses are not affected by this setting.
+ HeadersWithUnderscoresAction HttpProtocolOptions_HeadersWithUnderscoresAction `protobuf:"varint,5,opt,name=headers_with_underscores_action,json=headersWithUnderscoresAction,proto3,enum=envoy.config.core.v3.HttpProtocolOptions_HeadersWithUnderscoresAction" json:"headers_with_underscores_action,omitempty"`
+}
+
+func (x *HttpProtocolOptions) Reset() {
+ *x = HttpProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpProtocolOptions) ProtoMessage() {}
+
+func (x *HttpProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpProtocolOptions.ProtoReflect.Descriptor instead.
+func (*HttpProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *HttpProtocolOptions) GetIdleTimeout() *duration.Duration {
+ if x != nil {
+ return x.IdleTimeout
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetMaxConnectionDuration() *duration.Duration {
+ if x != nil {
+ return x.MaxConnectionDuration
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxHeadersCount
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetMaxStreamDuration() *duration.Duration {
+ if x != nil {
+ return x.MaxStreamDuration
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetHeadersWithUnderscoresAction() HttpProtocolOptions_HeadersWithUnderscoresAction {
+ if x != nil {
+ return x.HeadersWithUnderscoresAction
+ }
+ return HttpProtocolOptions_ALLOW
+}
+
+// [#next-free-field: 8]
+type Http1ProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Handle HTTP requests with absolute URLs in the requests. These requests
+ // are generally sent by clients to forward/explicit proxies. This allows clients to configure
+ // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the
+ // *http_proxy* environment variable.
+ AllowAbsoluteUrl *wrappers.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"`
+ // Handle incoming HTTP/1.0 and HTTP 0.9 requests.
+ // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1
+ // style connect logic, dechunking, and handling lack of client host iff
+ // *default_host_for_http_10* is configured.
+ AcceptHttp_10 bool `protobuf:"varint,2,opt,name=accept_http_10,json=acceptHttp10,proto3" json:"accept_http_10,omitempty"`
+ // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as
+ // Envoy does not otherwise support HTTP/1.0 without a Host header.
+ // This is a no-op if *accept_http_10* is not true.
+ DefaultHostForHttp_10 string `protobuf:"bytes,3,opt,name=default_host_for_http_10,json=defaultHostForHttp10,proto3" json:"default_host_for_http_10,omitempty"`
+ // Describes how the keys for response headers should be formatted. By default, all header keys
+ // are lower cased.
+ HeaderKeyFormat *Http1ProtocolOptions_HeaderKeyFormat `protobuf:"bytes,4,opt,name=header_key_format,json=headerKeyFormat,proto3" json:"header_key_format,omitempty"`
+ // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.
+ //
+ // .. attention::
+ //
+ // Note that this only happens when Envoy is chunk encoding which occurs when:
+ // - The request is HTTP/1.1.
+ // - Is neither a HEAD only request nor a HTTP Upgrade.
+ // - Not a response to a HEAD request.
+ // - The content length header is not present.
+ EnableTrailers bool `protobuf:"varint,5,opt,name=enable_trailers,json=enableTrailers,proto3" json:"enable_trailers,omitempty"`
+ // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding`
+ // headers set. By default such messages are rejected, but if option is enabled - Envoy will
+ // remove Content-Length header and process message.
+ // See `RFC7230, sec. 3.3.3 ` for details.
+ //
+ // .. attention::
+ // Enabling this option might lead to request smuggling vulnerability, especially if traffic
+ // is proxied via multiple layers of proxies.
+ AllowChunkedLength bool `protobuf:"varint,6,opt,name=allow_chunked_length,json=allowChunkedLength,proto3" json:"allow_chunked_length,omitempty"`
+ // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate
+ // HTTP/1.1 connections upon receiving an invalid HTTP message. However,
+ // when this option is true, then Envoy will leave the HTTP/1.1 connection
+ // open where possible.
+ // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging
+ // `.
+ OverrideStreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,7,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"`
+}
+
+func (x *Http1ProtocolOptions) Reset() {
+ *x = Http1ProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http1ProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http1ProtocolOptions) ProtoMessage() {}
+
+func (x *Http1ProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http1ProtocolOptions.ProtoReflect.Descriptor instead.
+func (*Http1ProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Http1ProtocolOptions) GetAllowAbsoluteUrl() *wrappers.BoolValue {
+ if x != nil {
+ return x.AllowAbsoluteUrl
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions) GetAcceptHttp_10() bool {
+ if x != nil {
+ return x.AcceptHttp_10
+ }
+ return false
+}
+
+func (x *Http1ProtocolOptions) GetDefaultHostForHttp_10() string {
+ if x != nil {
+ return x.DefaultHostForHttp_10
+ }
+ return ""
+}
+
+func (x *Http1ProtocolOptions) GetHeaderKeyFormat() *Http1ProtocolOptions_HeaderKeyFormat {
+ if x != nil {
+ return x.HeaderKeyFormat
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions) GetEnableTrailers() bool {
+ if x != nil {
+ return x.EnableTrailers
+ }
+ return false
+}
+
+func (x *Http1ProtocolOptions) GetAllowChunkedLength() bool {
+ if x != nil {
+ return x.AllowChunkedLength
+ }
+ return false
+}
+
+func (x *Http1ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrappers.BoolValue {
+ if x != nil {
+ return x.OverrideStreamErrorOnInvalidHttpMessage
+ }
+ return nil
+}
+
+type KeepaliveSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.
+ Interval *duration.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"`
+ // How long to wait for a response to a keepalive PING. If a response is not received within this
+ // time period, the connection will be aborted.
+ Timeout *duration.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // A random jitter amount as a percentage of interval that will be added to each interval.
+ // A value of zero means there will be no jitter.
+ // The default value is 15%.
+ IntervalJitter *v3.Percent `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"`
+}
+
+func (x *KeepaliveSettings) Reset() {
+ *x = KeepaliveSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepaliveSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeepaliveSettings) ProtoMessage() {}
+
+func (x *KeepaliveSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeepaliveSettings.ProtoReflect.Descriptor instead.
+func (*KeepaliveSettings) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *KeepaliveSettings) GetInterval() *duration.Duration {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *KeepaliveSettings) GetTimeout() *duration.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *KeepaliveSettings) GetIntervalJitter() *v3.Percent {
+ if x != nil {
+ return x.IntervalJitter
+ }
+ return nil
+}
+
+// [#next-free-field: 16]
+type Http2ProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // `Maximum table size `_
+ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values
+ // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header
+ // compression.
+ HpackTableSize *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=hpack_table_size,json=hpackTableSize,proto3" json:"hpack_table_size,omitempty"`
+ // `Maximum concurrent streams `_
+ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)
+ // and defaults to 2147483647.
+ //
+ // For upstream connections, this also limits how many streams Envoy will initiate concurrently
+ // on a single connection. If the limit is reached, Envoy may queue requests or establish
+ // additional connections (as allowed per circuit breaker limits).
+ //
+ // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given
+ // connection based on upstream settings. Config dumps will reflect the configured upper bound,
+ // not the per-connection negotiated limits.
+ MaxConcurrentStreams *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"`
+ // `Initial stream-level flow-control window
+ // `_ size. Valid values range from 65535
+ // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456
+ // (256 * 1024 * 1024).
+ //
+ // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default
+ // window size now, so it's also the minimum.
+ //
+ // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the
+ // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to
+ // stop the flow of data to the codec buffers.
+ InitialStreamWindowSize *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"`
+ // Similar to *initial_stream_window_size*, but for connection-level flow-control
+ // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.
+ InitialConnectionWindowSize *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"`
+ // Allows proxying Websocket and other upgrades over H2 connect.
+ AllowConnect bool `protobuf:"varint,5,opt,name=allow_connect,json=allowConnect,proto3" json:"allow_connect,omitempty"`
+ // [#not-implemented-hide:] Hiding until envoy has full metadata support.
+ // Still under implementation. DO NOT USE.
+ //
+ // Allows metadata. See [metadata
+ // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more
+ // information.
+ AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"`
+ // Limit the number of pending outbound downstream frames of all types (frames that are waiting to
+ // be written into the socket). Exceeding this limit triggers flood mitigation and connection is
+ // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due
+ // to flood mitigation. The default limit is 10000.
+ // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the
+ // `envoy.reloadable_features.upstream_http2_flood_checks` flag.
+ MaxOutboundFrames *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=max_outbound_frames,json=maxOutboundFrames,proto3" json:"max_outbound_frames,omitempty"`
+ // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,
+ // preventing high memory utilization when receiving continuous stream of these frames. Exceeding
+ // this limit triggers flood mitigation and connection is terminated. The
+ // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood
+ // mitigation. The default limit is 1000.
+ // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the
+ // `envoy.reloadable_features.upstream_http2_flood_checks` flag.
+ MaxOutboundControlFrames *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=max_outbound_control_frames,json=maxOutboundControlFrames,proto3" json:"max_outbound_control_frames,omitempty"`
+ // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an
+ // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but
+ // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``
+ // stat tracks the number of connections terminated due to flood mitigation.
+ // Setting this to 0 will terminate connection upon receiving first frame with an empty payload
+ // and no end stream flag. The default limit is 1.
+ // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the
+ // `envoy.reloadable_features.upstream_http2_flood_checks` flag.
+ MaxConsecutiveInboundFramesWithEmptyPayload *wrappers.UInt32Value `protobuf:"bytes,9,opt,name=max_consecutive_inbound_frames_with_empty_payload,json=maxConsecutiveInboundFramesWithEmptyPayload,proto3" json:"max_consecutive_inbound_frames_with_empty_payload,omitempty"`
+ // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number
+ // of PRIORITY frames received over the lifetime of connection exceeds the value calculated
+ // using this formula::
+ //
+ // max_inbound_priority_frames_per_stream * (1 + opened_streams)
+ //
+ // the connection is terminated. For downstream connections the `opened_streams` is incremented when
+ // Envoy receives complete response headers from the upstream server. For upstream connection the
+ // `opened_streams` is incremented when Envoy send the HEADERS frame for a new stream. The
+ // ``http2.inbound_priority_frames_flood`` stat tracks
+ // the number of connections terminated due to flood mitigation. The default limit is 100.
+ // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the
+ // `envoy.reloadable_features.upstream_http2_flood_checks` flag.
+ MaxInboundPriorityFramesPerStream *wrappers.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"`
+ // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number
+ // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated
+ // using this formula::
+ //
+ // 5 + 2 * (opened_streams +
+ // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)
+ //
+ // the connection is terminated. For downstream connections the `opened_streams` is incremented when
+ // Envoy receives complete response headers from the upstream server. For upstream connections the
+ // `opened_streams` is incremented when Envoy sends the HEADERS frame for a new stream. The
+ // ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to
+ // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10.
+ // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,
+ // but more complex implementations that try to estimate available bandwidth require at least 2.
+ // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the
+ // `envoy.reloadable_features.upstream_http2_flood_checks` flag.
+ MaxInboundWindowUpdateFramesPerDataFrameSent *wrappers.UInt32Value `protobuf:"bytes,11,opt,name=max_inbound_window_update_frames_per_data_frame_sent,json=maxInboundWindowUpdateFramesPerDataFrameSent,proto3" json:"max_inbound_window_update_frames_per_data_frame_sent,omitempty"`
+ // Allows invalid HTTP messaging and headers. When this option is disabled (default), then
+ // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,
+ // when this option is enabled, only the offending stream is terminated.
+ //
+ // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging
+ // `
+ // iff present.
+ //
+ // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message
+ // `
+ //
+ // See `RFC7540, sec. 8.1 `_ for details.
+ //
+ // Deprecated: Do not use.
+ StreamErrorOnInvalidHttpMessaging bool `protobuf:"varint,12,opt,name=stream_error_on_invalid_http_messaging,json=streamErrorOnInvalidHttpMessaging,proto3" json:"stream_error_on_invalid_http_messaging,omitempty"`
+ // Allows invalid HTTP messaging and headers. When this option is disabled (default), then
+ // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,
+ // when this option is enabled, only the offending stream is terminated.
+ //
+ // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging
+ // `
+ //
+ // See `RFC7540, sec. 8.1 `_ for details.
+ OverrideStreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,14,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"`
+ // [#not-implemented-hide:]
+ // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:
+ //
+ // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by
+ // Envoy.
+ //
+ // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field
+ // 'allow_connect'.
+ //
+ // Note that custom parameters specified through this field can not also be set in the
+ // corresponding named parameters:
+ //
+ // .. code-block:: text
+ //
+ // ID Field Name
+ // ----------------
+ // 0x1 hpack_table_size
+ // 0x3 max_concurrent_streams
+ // 0x4 initial_stream_window_size
+ //
+ // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies
+ // between custom parameters with the same identifier will trigger a failure.
+ //
+ // See `IANA HTTP/2 Settings
+ // `_ for
+ // standardized identifiers.
+ CustomSettingsParameters []*Http2ProtocolOptions_SettingsParameter `protobuf:"bytes,13,rep,name=custom_settings_parameters,json=customSettingsParameters,proto3" json:"custom_settings_parameters,omitempty"`
+ // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer
+ // does not respond within the configured timeout, the connection will be aborted.
+ ConnectionKeepalive *KeepaliveSettings `protobuf:"bytes,15,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"`
+}
+
+func (x *Http2ProtocolOptions) Reset() {
+ *x = Http2ProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http2ProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http2ProtocolOptions) ProtoMessage() {}
+
+func (x *Http2ProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http2ProtocolOptions.ProtoReflect.Descriptor instead.
+func (*Http2ProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Http2ProtocolOptions) GetHpackTableSize() *wrappers.UInt32Value {
+ if x != nil {
+ return x.HpackTableSize
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxConcurrentStreams() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxConcurrentStreams
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetInitialStreamWindowSize() *wrappers.UInt32Value {
+ if x != nil {
+ return x.InitialStreamWindowSize
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetInitialConnectionWindowSize() *wrappers.UInt32Value {
+ if x != nil {
+ return x.InitialConnectionWindowSize
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetAllowConnect() bool {
+ if x != nil {
+ return x.AllowConnect
+ }
+ return false
+}
+
+func (x *Http2ProtocolOptions) GetAllowMetadata() bool {
+ if x != nil {
+ return x.AllowMetadata
+ }
+ return false
+}
+
+func (x *Http2ProtocolOptions) GetMaxOutboundFrames() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxOutboundFrames
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxOutboundControlFrames() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxOutboundControlFrames
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxConsecutiveInboundFramesWithEmptyPayload() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxConsecutiveInboundFramesWithEmptyPayload
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxInboundPriorityFramesPerStream() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxInboundPriorityFramesPerStream
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxInboundWindowUpdateFramesPerDataFrameSent() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxInboundWindowUpdateFramesPerDataFrameSent
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Http2ProtocolOptions) GetStreamErrorOnInvalidHttpMessaging() bool {
+ if x != nil {
+ return x.StreamErrorOnInvalidHttpMessaging
+ }
+ return false
+}
+
+func (x *Http2ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrappers.BoolValue {
+ if x != nil {
+ return x.OverrideStreamErrorOnInvalidHttpMessage
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetCustomSettingsParameters() []*Http2ProtocolOptions_SettingsParameter {
+ if x != nil {
+ return x.CustomSettingsParameters
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetConnectionKeepalive() *KeepaliveSettings {
+ if x != nil {
+ return x.ConnectionKeepalive
+ }
+ return nil
+}
+
+// [#not-implemented-hide:]
+type GrpcProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Http2ProtocolOptions *Http2ProtocolOptions `protobuf:"bytes,1,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"`
+}
+
+func (x *GrpcProtocolOptions) Reset() {
+ *x = GrpcProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcProtocolOptions) ProtoMessage() {}
+
+func (x *GrpcProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcProtocolOptions.ProtoReflect.Descriptor instead.
+func (*GrpcProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *GrpcProtocolOptions) GetHttp2ProtocolOptions() *Http2ProtocolOptions {
+ if x != nil {
+ return x.Http2ProtocolOptions
+ }
+ return nil
+}
+
+// A message which allows using HTTP/3.
+type Http3ProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ QuicProtocolOptions *QuicProtocolOptions `protobuf:"bytes,1,opt,name=quic_protocol_options,json=quicProtocolOptions,proto3" json:"quic_protocol_options,omitempty"`
+ // Allows invalid HTTP messaging and headers. When this option is disabled (default), then
+ // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However,
+ // when this option is enabled, only the offending stream is terminated.
+ //
+ // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging
+ // `.
+ OverrideStreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,2,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"`
+}
+
+func (x *Http3ProtocolOptions) Reset() {
+ *x = Http3ProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http3ProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http3ProtocolOptions) ProtoMessage() {}
+
+func (x *Http3ProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http3ProtocolOptions.ProtoReflect.Descriptor instead.
+func (*Http3ProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Http3ProtocolOptions) GetQuicProtocolOptions() *QuicProtocolOptions {
+ if x != nil {
+ return x.QuicProtocolOptions
+ }
+ return nil
+}
+
+func (x *Http3ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrappers.BoolValue {
+ if x != nil {
+ return x.OverrideStreamErrorOnInvalidHttpMessage
+ }
+ return nil
+}
+
+// [#next-free-field: 9]
+type Http1ProtocolOptions_HeaderKeyFormat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to HeaderFormat:
+ // *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_
+ // *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter
+ HeaderFormat isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat `protobuf_oneof:"header_format"`
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) Reset() {
+ *x = Http1ProtocolOptions_HeaderKeyFormat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat) ProtoMessage() {}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat.ProtoReflect.Descriptor instead.
+func (*Http1ProtocolOptions_HeaderKeyFormat) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (m *Http1ProtocolOptions_HeaderKeyFormat) GetHeaderFormat() isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat {
+ if m != nil {
+ return m.HeaderFormat
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) GetProperCaseWords() *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords {
+ if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok {
+ return x.ProperCaseWords
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) GetStatefulFormatter() *TypedExtensionConfig {
+ if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter); ok {
+ return x.StatefulFormatter
+ }
+ return nil
+}
+
+type isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat interface {
+ isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat()
+}
+
+type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ struct {
+ // Formats the header by proper casing words: the first character and any character following
+ // a special character will be capitalized if it's an alpha character. For example,
+ // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are".
+ // Note that while this results in most headers following conventional casing, certain headers
+ // are not covered. For example, the "TE" header will be formatted as "Te".
+ ProperCaseWords *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords `protobuf:"bytes,1,opt,name=proper_case_words,json=properCaseWords,proto3,oneof"`
+}
+
+type Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter struct {
+ // Configuration for stateful formatter extensions that allow using received headers to
+ // affect the output of encoding headers. E.g., preserving case during proxying.
+ // [#extension-category: envoy.http.stateful_header_formatters]
+ StatefulFormatter *TypedExtensionConfig `protobuf:"bytes,8,opt,name=stateful_formatter,json=statefulFormatter,proto3,oneof"`
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() {
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() {
+}
+
+type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Reset() {
+ *x = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoMessage() {}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ProtoReflect.Descriptor instead.
+func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5, 0, 0}
+}
+
+// Defines a parameter to be sent in the SETTINGS frame.
+// See `RFC7540, sec. 6.5.1 `_ for details.
+type Http2ProtocolOptions_SettingsParameter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The 16 bit parameter identifier.
+ Identifier *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ // The 32 bit parameter value.
+ Value *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) Reset() {
+ *x = Http2ProtocolOptions_SettingsParameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http2ProtocolOptions_SettingsParameter) ProtoMessage() {}
+
+func (x *Http2ProtocolOptions_SettingsParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http2ProtocolOptions_SettingsParameter.ProtoReflect.Descriptor instead.
+func (*Http2ProtocolOptions_SettingsParameter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{7, 0}
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) GetIdentifier() *wrappers.UInt32Value {
+ if x != nil {
+ return x.Identifier
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) GetValue() *wrappers.UInt32Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_protocol_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33,
+ 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x41,
+ 0x0a, 0x12, 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54,
+ 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0xc3, 0x02, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74,
+ 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x67, 0x0a,
+ 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f,
+ 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x0c, 0xfa, 0x42, 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x08, 0x28, 0x01, 0x52, 0x17, 0x69,
+ 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64,
+ 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6f, 0x0a, 0x1e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61,
+ 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e,
+ 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0xfa, 0x42,
+ 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x0c, 0x28, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e,
+ 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f,
+ 0x73, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53,
+ 0x6e, 0x69, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x11, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x61, 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x1e, 0x41, 0x6c, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04,
+ 0x2a, 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
+ 0x22, 0xd2, 0x04, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78,
+ 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13,
+ 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63,
+ 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f,
+ 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65,
+ 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65,
+ 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57,
+ 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51,
+ 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48,
+ 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xce, 0x07, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48,
+ 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65,
+ 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73,
+ 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65,
+ 0x70, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36,
+ 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66,
+ 0x6f, 0x72, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72,
+ 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27,
+ 0x0a, 0x0f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72,
+ 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54,
+ 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e,
+ 0x6b, 0x65, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65,
+ 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74,
+ 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76,
+ 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, 0x72, 0x6f,
+ 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70,
+ 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73,
+ 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f,
+ 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x5f,
+ 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72,
+ 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f,
+ 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74,
+ 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72,
+ 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe0, 0x01, 0x0a, 0x11, 0x4b, 0x65, 0x65, 0x70, 0x61,
+ 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x45, 0x0a, 0x08,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01,
+ 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52,
+ 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x22, 0x81, 0x0e, 0x0a, 0x14, 0x48, 0x74,
+ 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, 0x70, 0x61, 0x63,
+ 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, 0x16, 0x6d, 0x61,
+ 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
+ 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x2a, 0x08, 0x18,
+ 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x6a, 0x0a,
+ 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f,
+ 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03,
+ 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57,
+ 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x69, 0x6e, 0x69,
+ 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03,
+ 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a,
+ 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, 0x6d, 0x61, 0x78,
+ 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x11, 0x6d,
+ 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73,
+ 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x61,
+ 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, 0x78, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75,
+ 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65,
+ 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x57, 0x69, 0x74,
+ 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x6f, 0x0a,
+ 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x69,
+ 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72,
+ 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x6d, 0x61, 0x78,
+ 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x46,
+ 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x91,
+ 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x77,
+ 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x61,
+ 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x72, 0x61,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04,
+ 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64,
+ 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x72, 0x61, 0x6d,
+ 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x65,
+ 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74,
+ 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52,
+ 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69,
+ 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x7a,
+ 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
+ 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, 0x14, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69,
+ 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x65,
+ 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x1a, 0xe5, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x74, 0x69,
+ 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0a,
+ 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13,
+ 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x18, 0xff, 0xff, 0x03, 0x28, 0x00, 0xfa, 0x42, 0x05, 0x8a, 0x01,
+ 0x02, 0x10, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12,
+ 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x3f, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x74,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x3a, 0x2d,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa5, 0x01,
+ 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d,
+ 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a,
+ 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74,
+ 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x3d, 0x0a, 0x22, 0x69, 0x6f, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_protocol_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_protocol_proto_rawDescData = file_envoy_config_core_v3_protocol_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_protocol_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_protocol_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_protocol_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_protocol_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_envoy_config_core_v3_protocol_proto_goTypes = []interface{}{
+ (HttpProtocolOptions_HeadersWithUnderscoresAction)(0), // 0: envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction
+ (*TcpProtocolOptions)(nil), // 1: envoy.config.core.v3.TcpProtocolOptions
+ (*QuicProtocolOptions)(nil), // 2: envoy.config.core.v3.QuicProtocolOptions
+ (*UpstreamHttpProtocolOptions)(nil), // 3: envoy.config.core.v3.UpstreamHttpProtocolOptions
+ (*AlternateProtocolsCacheOptions)(nil), // 4: envoy.config.core.v3.AlternateProtocolsCacheOptions
+ (*HttpProtocolOptions)(nil), // 5: envoy.config.core.v3.HttpProtocolOptions
+ (*Http1ProtocolOptions)(nil), // 6: envoy.config.core.v3.Http1ProtocolOptions
+ (*KeepaliveSettings)(nil), // 7: envoy.config.core.v3.KeepaliveSettings
+ (*Http2ProtocolOptions)(nil), // 8: envoy.config.core.v3.Http2ProtocolOptions
+ (*GrpcProtocolOptions)(nil), // 9: envoy.config.core.v3.GrpcProtocolOptions
+ (*Http3ProtocolOptions)(nil), // 10: envoy.config.core.v3.Http3ProtocolOptions
+ (*Http1ProtocolOptions_HeaderKeyFormat)(nil), // 11: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat
+ (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords)(nil), // 12: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords
+ (*Http2ProtocolOptions_SettingsParameter)(nil), // 13: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter
+ (*wrappers.UInt32Value)(nil), // 14: google.protobuf.UInt32Value
+ (*duration.Duration)(nil), // 15: google.protobuf.Duration
+ (*wrappers.BoolValue)(nil), // 16: google.protobuf.BoolValue
+ (*v3.Percent)(nil), // 17: envoy.type.v3.Percent
+ (*TypedExtensionConfig)(nil), // 18: envoy.config.core.v3.TypedExtensionConfig
+}
+var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{
+ 14, // 0: envoy.config.core.v3.QuicProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value
+ 14, // 1: envoy.config.core.v3.QuicProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value
+ 14, // 2: envoy.config.core.v3.QuicProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value
+ 14, // 3: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value
+ 15, // 4: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration
+ 15, // 5: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration
+ 14, // 6: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value
+ 15, // 7: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration
+ 0, // 8: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction
+ 16, // 9: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue
+ 11, // 10: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat
+ 16, // 11: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue
+ 15, // 12: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration
+ 15, // 13: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration
+ 17, // 14: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent
+ 14, // 15: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value
+ 14, // 16: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value
+ 14, // 17: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value
+ 14, // 18: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value
+ 14, // 19: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value
+ 14, // 20: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value
+ 14, // 21: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value
+ 14, // 22: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value
+ 14, // 23: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value
+ 16, // 24: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue
+ 13, // 25: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter
+ 7, // 26: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings
+ 8, // 27: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions
+ 2, // 28: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions
+ 16, // 29: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue
+ 12, // 30: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords
+ 18, // 31: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 14, // 32: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value
+ 14, // 33: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value
+ 34, // [34:34] is the sub-list for method output_type
+ 34, // [34:34] is the sub-list for method input_type
+ 34, // [34:34] is the sub-list for extension type_name
+ 34, // [34:34] is the sub-list for extension extendee
+ 0, // [0:34] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_protocol_proto_init() }
+func file_envoy_config_core_v3_protocol_proto_init() {
+ if File_envoy_config_core_v3_protocol_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_extension_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TcpProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QuicProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpstreamHttpProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AlternateProtocolsCacheOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http1ProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepaliveSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http2ProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http3ProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http2ProtocolOptions_SettingsParameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_)(nil),
+ (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_protocol_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_protocol_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_protocol_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_protocol_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_protocol_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_protocol_proto = out.File
+ file_envoy_config_core_v3_protocol_proto_rawDesc = nil
+ file_envoy_config_core_v3_protocol_proto_goTypes = nil
+ file_envoy_config_core_v3_protocol_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go
new file mode 100644
index 000000000..b5eae3ee7
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go
@@ -0,0 +1,1353 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/protocol.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on TcpProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *TcpProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// TcpProtocolOptionsValidationError is the validation error returned by
+// TcpProtocolOptions.Validate if the designated constraints aren't met.
+type TcpProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TcpProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TcpProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TcpProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TcpProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TcpProtocolOptionsValidationError) ErrorName() string {
+ return "TcpProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e TcpProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTcpProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TcpProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TcpProtocolOptionsValidationError{}
+
+// Validate checks the field values on QuicProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *QuicProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetMaxConcurrentStreams()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return QuicProtocolOptionsValidationError{
+ field: "MaxConcurrentStreams",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 1 || val > 16777216 {
+ return QuicProtocolOptionsValidationError{
+ field: "InitialStreamWindowSize",
+ reason: "value must be inside range [1, 16777216]",
+ }
+ }
+
+ }
+
+ if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 1 || val > 25165824 {
+ return QuicProtocolOptionsValidationError{
+ field: "InitialConnectionWindowSize",
+ reason: "value must be inside range [1, 25165824]",
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// QuicProtocolOptionsValidationError is the validation error returned by
+// QuicProtocolOptions.Validate if the designated constraints aren't met.
+type QuicProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e QuicProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e QuicProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e QuicProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e QuicProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e QuicProtocolOptionsValidationError) ErrorName() string {
+ return "QuicProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e QuicProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sQuicProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = QuicProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = QuicProtocolOptionsValidationError{}
+
+// Validate checks the field values on UpstreamHttpProtocolOptions with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *UpstreamHttpProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for AutoSni
+
+ // no validation rules for AutoSanValidation
+
+ return nil
+}
+
+// UpstreamHttpProtocolOptionsValidationError is the validation error returned
+// by UpstreamHttpProtocolOptions.Validate if the designated constraints
+// aren't met.
+type UpstreamHttpProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UpstreamHttpProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UpstreamHttpProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UpstreamHttpProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UpstreamHttpProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UpstreamHttpProtocolOptionsValidationError) ErrorName() string {
+ return "UpstreamHttpProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UpstreamHttpProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUpstreamHttpProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UpstreamHttpProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UpstreamHttpProtocolOptionsValidationError{}
+
+// Validate checks the field values on AlternateProtocolsCacheOptions with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *AlternateProtocolsCacheOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ return AlternateProtocolsCacheOptionsValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if wrapper := m.GetMaxEntries(); wrapper != nil {
+
+ if wrapper.GetValue() <= 0 {
+ return AlternateProtocolsCacheOptionsValidationError{
+ field: "MaxEntries",
+ reason: "value must be greater than 0",
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// AlternateProtocolsCacheOptionsValidationError is the validation error
+// returned by AlternateProtocolsCacheOptions.Validate if the designated
+// constraints aren't met.
+type AlternateProtocolsCacheOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AlternateProtocolsCacheOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AlternateProtocolsCacheOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AlternateProtocolsCacheOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AlternateProtocolsCacheOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AlternateProtocolsCacheOptionsValidationError) ErrorName() string {
+ return "AlternateProtocolsCacheOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e AlternateProtocolsCacheOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAlternateProtocolsCacheOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AlternateProtocolsCacheOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AlternateProtocolsCacheOptionsValidationError{}
+
+// Validate checks the field values on HttpProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HttpProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpProtocolOptionsValidationError{
+ field: "IdleTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetMaxConnectionDuration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpProtocolOptionsValidationError{
+ field: "MaxConnectionDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetMaxHeadersCount(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ return HttpProtocolOptionsValidationError{
+ field: "MaxHeadersCount",
+ reason: "value must be greater than or equal to 1",
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpProtocolOptionsValidationError{
+ field: "MaxStreamDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for HeadersWithUnderscoresAction
+
+ return nil
+}
+
+// HttpProtocolOptionsValidationError is the validation error returned by
+// HttpProtocolOptions.Validate if the designated constraints aren't met.
+type HttpProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpProtocolOptionsValidationError) ErrorName() string {
+ return "HttpProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpProtocolOptionsValidationError{}
+
+// Validate checks the field values on Http1ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *Http1ProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetAllowAbsoluteUrl()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptionsValidationError{
+ field: "AllowAbsoluteUrl",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for AcceptHttp_10
+
+ // no validation rules for DefaultHostForHttp_10
+
+ if v, ok := interface{}(m.GetHeaderKeyFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptionsValidationError{
+ field: "HeaderKeyFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for EnableTrailers
+
+ // no validation rules for AllowChunkedLength
+
+ if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// Http1ProtocolOptionsValidationError is the validation error returned by
+// Http1ProtocolOptions.Validate if the designated constraints aren't met.
+type Http1ProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http1ProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http1ProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http1ProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http1ProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http1ProtocolOptionsValidationError) ErrorName() string {
+ return "Http1ProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http1ProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp1ProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http1ProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http1ProtocolOptionsValidationError{}
+
+// Validate checks the field values on KeepaliveSettings with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *KeepaliveSettings) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetInterval() == nil {
+ return KeepaliveSettingsValidationError{
+ field: "Interval",
+ reason: "value is required",
+ }
+ }
+
+ if d := m.GetInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return KeepaliveSettingsValidationError{
+ field: "Interval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte {
+ return KeepaliveSettingsValidationError{
+ field: "Interval",
+ reason: "value must be greater than or equal to 1ms",
+ }
+ }
+
+ }
+
+ if m.GetTimeout() == nil {
+ return KeepaliveSettingsValidationError{
+ field: "Timeout",
+ reason: "value is required",
+ }
+ }
+
+ if d := m.GetTimeout(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return KeepaliveSettingsValidationError{
+ field: "Timeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte {
+ return KeepaliveSettingsValidationError{
+ field: "Timeout",
+ reason: "value must be greater than or equal to 1ms",
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return KeepaliveSettingsValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// KeepaliveSettingsValidationError is the validation error returned by
+// KeepaliveSettings.Validate if the designated constraints aren't met.
+type KeepaliveSettingsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e KeepaliveSettingsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e KeepaliveSettingsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e KeepaliveSettingsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e KeepaliveSettingsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e KeepaliveSettingsValidationError) ErrorName() string {
+ return "KeepaliveSettingsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e KeepaliveSettingsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sKeepaliveSettings.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = KeepaliveSettingsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = KeepaliveSettingsValidationError{}
+
+// Validate checks the field values on Http2ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *Http2ProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetHpackTableSize()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "HpackTableSize",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 1 || val > 2147483647 {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxConcurrentStreams",
+ reason: "value must be inside range [1, 2147483647]",
+ }
+ }
+
+ }
+
+ if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 65535 || val > 2147483647 {
+ return Http2ProtocolOptionsValidationError{
+ field: "InitialStreamWindowSize",
+ reason: "value must be inside range [65535, 2147483647]",
+ }
+ }
+
+ }
+
+ if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 65535 || val > 2147483647 {
+ return Http2ProtocolOptionsValidationError{
+ field: "InitialConnectionWindowSize",
+ reason: "value must be inside range [65535, 2147483647]",
+ }
+ }
+
+ }
+
+ // no validation rules for AllowConnect
+
+ // no validation rules for AllowMetadata
+
+ if wrapper := m.GetMaxOutboundFrames(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxOutboundFrames",
+ reason: "value must be greater than or equal to 1",
+ }
+ }
+
+ }
+
+ if wrapper := m.GetMaxOutboundControlFrames(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxOutboundControlFrames",
+ reason: "value must be greater than or equal to 1",
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxConsecutiveInboundFramesWithEmptyPayload",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxInboundPriorityFramesPerStream",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetMaxInboundWindowUpdateFramesPerDataFrameSent(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxInboundWindowUpdateFramesPerDataFrameSent",
+ reason: "value must be greater than or equal to 1",
+ }
+ }
+
+ }
+
+ // no validation rules for StreamErrorOnInvalidHttpMessaging
+
+ if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetCustomSettingsParameters() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: fmt.Sprintf("CustomSettingsParameters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetConnectionKeepalive()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "ConnectionKeepalive",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// Http2ProtocolOptionsValidationError is the validation error returned by
+// Http2ProtocolOptions.Validate if the designated constraints aren't met.
+type Http2ProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http2ProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http2ProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http2ProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http2ProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http2ProtocolOptionsValidationError) ErrorName() string {
+ return "Http2ProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http2ProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp2ProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http2ProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http2ProtocolOptionsValidationError{}
+
+// Validate checks the field values on GrpcProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *GrpcProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcProtocolOptionsValidationError{
+ field: "Http2ProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// GrpcProtocolOptionsValidationError is the validation error returned by
+// GrpcProtocolOptions.Validate if the designated constraints aren't met.
+type GrpcProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcProtocolOptionsValidationError) ErrorName() string {
+ return "GrpcProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcProtocolOptionsValidationError{}
+
+// Validate checks the field values on Http3ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *Http3ProtocolOptions) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetQuicProtocolOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http3ProtocolOptionsValidationError{
+ field: "QuicProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http3ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// Http3ProtocolOptionsValidationError is the validation error returned by
+// Http3ProtocolOptions.Validate if the designated constraints aren't met.
+type Http3ProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http3ProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http3ProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http3ProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http3ProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http3ProtocolOptionsValidationError) ErrorName() string {
+ return "Http3ProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http3ProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp3ProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http3ProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http3ProtocolOptionsValidationError{}
+
+// Validate checks the field values on Http1ProtocolOptions_HeaderKeyFormat
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, an error is returned.
+func (m *Http1ProtocolOptions_HeaderKeyFormat) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.HeaderFormat.(type) {
+
+ case *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_:
+
+ if v, ok := interface{}(m.GetProperCaseWords()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "ProperCaseWords",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter:
+
+ if v, ok := interface{}(m.GetStatefulFormatter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "StatefulFormatter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "HeaderFormat",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// Http1ProtocolOptions_HeaderKeyFormatValidationError is the validation error
+// returned by Http1ProtocolOptions_HeaderKeyFormat.Validate if the designated
+// constraints aren't met.
+type Http1ProtocolOptions_HeaderKeyFormatValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) ErrorName() string {
+ return "Http1ProtocolOptions_HeaderKeyFormatValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp1ProtocolOptions_HeaderKeyFormat.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http1ProtocolOptions_HeaderKeyFormatValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http1ProtocolOptions_HeaderKeyFormatValidationError{}
+
+// Validate checks the field values on
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError is the
+// validation error returned by
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.Validate if the
+// designated constraints aren't met.
+type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) ErrorName() string {
+ return "Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{}
+
+// Validate checks the field values on Http2ProtocolOptions_SettingsParameter
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, an error is returned.
+func (m *Http2ProtocolOptions_SettingsParameter) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if wrapper := m.GetIdentifier(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 0 || val > 65535 {
+ return Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Identifier",
+ reason: "value must be inside range [0, 65535]",
+ }
+ }
+
+ } else {
+ return Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Identifier",
+ reason: "value is required and must not be nil.",
+ }
+ }
+
+ if m.GetValue() == nil {
+ return Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// Http2ProtocolOptions_SettingsParameterValidationError is the validation
+// error returned by Http2ProtocolOptions_SettingsParameter.Validate if the
+// designated constraints aren't met.
+type Http2ProtocolOptions_SettingsParameterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) ErrorName() string {
+ return "Http2ProtocolOptions_SettingsParameterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp2ProtocolOptions_SettingsParameter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http2ProtocolOptions_SettingsParameterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http2ProtocolOptions_SettingsParameterValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go
new file mode 100644
index 000000000..1c0f60cfc
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go
@@ -0,0 +1,214 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/proxy_protocol.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type ProxyProtocolConfig_Version int32
+
+const (
+ // PROXY protocol version 1. Human readable format.
+ ProxyProtocolConfig_V1 ProxyProtocolConfig_Version = 0
+ // PROXY protocol version 2. Binary format.
+ ProxyProtocolConfig_V2 ProxyProtocolConfig_Version = 1
+)
+
+// Enum value maps for ProxyProtocolConfig_Version.
+var (
+ ProxyProtocolConfig_Version_name = map[int32]string{
+ 0: "V1",
+ 1: "V2",
+ }
+ ProxyProtocolConfig_Version_value = map[string]int32{
+ "V1": 0,
+ "V2": 1,
+ }
+)
+
+func (x ProxyProtocolConfig_Version) Enum() *ProxyProtocolConfig_Version {
+ p := new(ProxyProtocolConfig_Version)
+ *p = x
+ return p
+}
+
+func (x ProxyProtocolConfig_Version) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ProxyProtocolConfig_Version) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0].Descriptor()
+}
+
+func (ProxyProtocolConfig_Version) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0]
+}
+
+func (x ProxyProtocolConfig_Version) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ProxyProtocolConfig_Version.Descriptor instead.
+func (ProxyProtocolConfig_Version) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type ProxyProtocolConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details
+ Version ProxyProtocolConfig_Version `protobuf:"varint,1,opt,name=version,proto3,enum=envoy.config.core.v3.ProxyProtocolConfig_Version" json:"version,omitempty"`
+}
+
+func (x *ProxyProtocolConfig) Reset() {
+ *x = ProxyProtocolConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProxyProtocolConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProxyProtocolConfig) ProtoMessage() {}
+
+func (x *ProxyProtocolConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProxyProtocolConfig.ProtoReflect.Descriptor instead.
+func (*ProxyProtocolConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ProxyProtocolConfig) GetVersion() ProxyProtocolConfig_Version {
+ if x != nil {
+ return x.Version
+ }
+ return ProxyProtocolConfig_V1
+}
+
+var File_envoy_config_core_v3_proxy_protocol_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x06, 0x0a, 0x02, 0x56, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x42,
+ 0x42, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06,
+ 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = file_envoy_config_core_v3_proxy_protocol_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_proxy_protocol_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_proxy_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_proxy_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_proxy_protocol_proto_goTypes = []interface{}{
+ (ProxyProtocolConfig_Version)(0), // 0: envoy.config.core.v3.ProxyProtocolConfig.Version
+ (*ProxyProtocolConfig)(nil), // 1: envoy.config.core.v3.ProxyProtocolConfig
+}
+var file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = []int32{
+ 0, // 0: envoy.config.core.v3.ProxyProtocolConfig.version:type_name -> envoy.config.core.v3.ProxyProtocolConfig.Version
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_proxy_protocol_proto_init() }
+func file_envoy_config_core_v3_proxy_protocol_proto_init() {
+ if File_envoy_config_core_v3_proxy_protocol_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProxyProtocolConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_proxy_protocol_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_proxy_protocol_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_proxy_protocol_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_proxy_protocol_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_proxy_protocol_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_proxy_protocol_proto = out.File
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = nil
+ file_envoy_config_core_v3_proxy_protocol_proto_goTypes = nil
+ file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go
new file mode 100644
index 000000000..947259cb4
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go
@@ -0,0 +1,103 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/proxy_protocol.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on ProxyProtocolConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *ProxyProtocolConfig) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Version
+
+ return nil
+}
+
+// ProxyProtocolConfigValidationError is the validation error returned by
+// ProxyProtocolConfig.Validate if the designated constraints aren't met.
+type ProxyProtocolConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ProxyProtocolConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ProxyProtocolConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ProxyProtocolConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ProxyProtocolConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ProxyProtocolConfigValidationError) ErrorName() string {
+ return "ProxyProtocolConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ProxyProtocolConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sProxyProtocolConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ProxyProtocolConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ProxyProtocolConfigValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go
new file mode 100644
index 000000000..855dfa920
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go
@@ -0,0 +1,316 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/socket_option.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type SocketOption_SocketState int32
+
+const (
+ // Socket options are applied after socket creation but before binding the socket to a port
+ SocketOption_STATE_PREBIND SocketOption_SocketState = 0
+ // Socket options are applied after binding the socket to a port but before calling listen()
+ SocketOption_STATE_BOUND SocketOption_SocketState = 1
+ // Socket options are applied after calling listen()
+ SocketOption_STATE_LISTENING SocketOption_SocketState = 2
+)
+
+// Enum value maps for SocketOption_SocketState.
+var (
+ SocketOption_SocketState_name = map[int32]string{
+ 0: "STATE_PREBIND",
+ 1: "STATE_BOUND",
+ 2: "STATE_LISTENING",
+ }
+ SocketOption_SocketState_value = map[string]int32{
+ "STATE_PREBIND": 0,
+ "STATE_BOUND": 1,
+ "STATE_LISTENING": 2,
+ }
+)
+
+func (x SocketOption_SocketState) Enum() *SocketOption_SocketState {
+ p := new(SocketOption_SocketState)
+ *p = x
+ return p
+}
+
+func (x SocketOption_SocketState) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SocketOption_SocketState) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_socket_option_proto_enumTypes[0].Descriptor()
+}
+
+func (SocketOption_SocketState) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_socket_option_proto_enumTypes[0]
+}
+
+func (x SocketOption_SocketState) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SocketOption_SocketState.Descriptor instead.
+func (SocketOption_SocketState) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Generic socket option message. This would be used to set socket options that
+// might not exist in upstream kernels or precompiled Envoy binaries.
+// [#next-free-field: 7]
+type SocketOption struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An optional name to give this socket option for debugging, etc.
+ // Uniqueness is not required and no special meaning is assumed.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP
+ Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"`
+ // The numeric name as passed to setsockopt
+ Name int64 `protobuf:"varint,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to Value:
+ // *SocketOption_IntValue
+ // *SocketOption_BufValue
+ Value isSocketOption_Value `protobuf_oneof:"value"`
+ // The state in which the option will be applied. When used in BindConfig
+ // STATE_PREBIND is currently the only valid value.
+ State SocketOption_SocketState `protobuf:"varint,6,opt,name=state,proto3,enum=envoy.config.core.v3.SocketOption_SocketState" json:"state,omitempty"`
+}
+
+func (x *SocketOption) Reset() {
+ *x = SocketOption{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SocketOption) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SocketOption) ProtoMessage() {}
+
+func (x *SocketOption) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SocketOption.ProtoReflect.Descriptor instead.
+func (*SocketOption) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SocketOption) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *SocketOption) GetLevel() int64 {
+ if x != nil {
+ return x.Level
+ }
+ return 0
+}
+
+func (x *SocketOption) GetName() int64 {
+ if x != nil {
+ return x.Name
+ }
+ return 0
+}
+
+func (m *SocketOption) GetValue() isSocketOption_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *SocketOption) GetIntValue() int64 {
+ if x, ok := x.GetValue().(*SocketOption_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (x *SocketOption) GetBufValue() []byte {
+ if x, ok := x.GetValue().(*SocketOption_BufValue); ok {
+ return x.BufValue
+ }
+ return nil
+}
+
+func (x *SocketOption) GetState() SocketOption_SocketState {
+ if x != nil {
+ return x.State
+ }
+ return SocketOption_STATE_PREBIND
+}
+
+type isSocketOption_Value interface {
+ isSocketOption_Value()
+}
+
+type SocketOption_IntValue struct {
+ // Because many sockopts take an int value.
+ IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type SocketOption_BufValue struct {
+ // Otherwise it's a byte buffer.
+ BufValue []byte `protobuf:"bytes,5,opt,name=buf_value,json=bufValue,proto3,oneof"`
+}
+
+func (*SocketOption_IntValue) isSocketOption_Value() {}
+
+func (*SocketOption_BufValue) isSocketOption_Value() {}
+
+var File_envoy_config_core_v3_socket_option_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x02, 0x0a, 0x0c,
+ 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14,
+ 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c,
+ 0x65, 0x76, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69,
+ 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x62, 0x75, 0x66, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x62, 0x75,
+ 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63,
+ 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x46, 0x0a, 0x0b, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50,
+ 0x52, 0x45, 0x42, 0x49, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41, 0x54,
+ 0x45, 0x5f, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41,
+ 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x25,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03,
+ 0xf8, 0x42, 0x01, 0x42, 0x41, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70,
+ 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x53, 0x6f, 0x63, 0x6b, 0x65,
+ 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80,
+ 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_socket_option_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_socket_option_proto_rawDescData = file_envoy_config_core_v3_socket_option_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_socket_option_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_socket_option_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_socket_option_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_socket_option_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_socket_option_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_socket_option_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_socket_option_proto_goTypes = []interface{}{
+ (SocketOption_SocketState)(0), // 0: envoy.config.core.v3.SocketOption.SocketState
+ (*SocketOption)(nil), // 1: envoy.config.core.v3.SocketOption
+}
+var file_envoy_config_core_v3_socket_option_proto_depIdxs = []int32{
+ 0, // 0: envoy.config.core.v3.SocketOption.state:type_name -> envoy.config.core.v3.SocketOption.SocketState
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_socket_option_proto_init() }
+func file_envoy_config_core_v3_socket_option_proto_init() {
+ if File_envoy_config_core_v3_socket_option_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_socket_option_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SocketOption); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_socket_option_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*SocketOption_IntValue)(nil),
+ (*SocketOption_BufValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_socket_option_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_socket_option_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_socket_option_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_socket_option_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_socket_option_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_socket_option_proto = out.File
+ file_envoy_config_core_v3_socket_option_proto_rawDesc = nil
+ file_envoy_config_core_v3_socket_option_proto_goTypes = nil
+ file_envoy_config_core_v3_socket_option_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go
new file mode 100644
index 000000000..b4497936f
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go
@@ -0,0 +1,128 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/socket_option.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on SocketOption with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *SocketOption) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Description
+
+ // no validation rules for Level
+
+ // no validation rules for Name
+
+ if _, ok := SocketOption_SocketState_name[int32(m.GetState())]; !ok {
+ return SocketOptionValidationError{
+ field: "State",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ switch m.Value.(type) {
+
+ case *SocketOption_IntValue:
+ // no validation rules for IntValue
+
+ case *SocketOption_BufValue:
+ // no validation rules for BufValue
+
+ default:
+ return SocketOptionValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// SocketOptionValidationError is the validation error returned by
+// SocketOption.Validate if the designated constraints aren't met.
+type SocketOptionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SocketOptionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SocketOptionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SocketOptionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SocketOptionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SocketOptionValidationError) ErrorName() string { return "SocketOptionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SocketOptionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSocketOption.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SocketOptionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SocketOptionValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go
new file mode 100644
index 000000000..3b0182697
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/substitution_format_string.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Configuration to use multiple :ref:`command operators `
+// to generate a new string in either plain text or JSON format.
+// [#next-free-field: 7]
+type SubstitutionFormatString struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Format:
+ // *SubstitutionFormatString_TextFormat
+ // *SubstitutionFormatString_JsonFormat
+ // *SubstitutionFormatString_TextFormatSource
+ Format isSubstitutionFormatString_Format `protobuf_oneof:"format"`
+ // If set to true, when command operators are evaluated to null,
+ //
+ // * for ``text_format``, the output of the empty operator is changed from ``-`` to an
+ // empty string, so that empty values are omitted entirely.
+ // * for ``json_format`` the keys with null values are omitted in the output structure.
+ OmitEmptyValues bool `protobuf:"varint,3,opt,name=omit_empty_values,json=omitEmptyValues,proto3" json:"omit_empty_values,omitempty"`
+ // Specify a *content_type* field.
+ // If this field is not set then ``text/plain`` is used for *text_format* and
+ // ``application/json`` is used for *json_format*.
+ //
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // content_type: "text/html; charset=UTF-8"
+ //
+ ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ // Specifies a collection of Formatter plugins that can be called from the access log configuration.
+ // See the formatters extensions documentation for details.
+ Formatters []*TypedExtensionConfig `protobuf:"bytes,6,rep,name=formatters,proto3" json:"formatters,omitempty"`
+}
+
+func (x *SubstitutionFormatString) Reset() {
+ *x = SubstitutionFormatString{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubstitutionFormatString) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubstitutionFormatString) ProtoMessage() {}
+
+func (x *SubstitutionFormatString) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubstitutionFormatString.ProtoReflect.Descriptor instead.
+func (*SubstitutionFormatString) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *SubstitutionFormatString) GetFormat() isSubstitutionFormatString_Format {
+ if m != nil {
+ return m.Format
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *SubstitutionFormatString) GetTextFormat() string {
+ if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormat); ok {
+ return x.TextFormat
+ }
+ return ""
+}
+
+func (x *SubstitutionFormatString) GetJsonFormat() *_struct.Struct {
+ if x, ok := x.GetFormat().(*SubstitutionFormatString_JsonFormat); ok {
+ return x.JsonFormat
+ }
+ return nil
+}
+
+func (x *SubstitutionFormatString) GetTextFormatSource() *DataSource {
+ if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormatSource); ok {
+ return x.TextFormatSource
+ }
+ return nil
+}
+
+func (x *SubstitutionFormatString) GetOmitEmptyValues() bool {
+ if x != nil {
+ return x.OmitEmptyValues
+ }
+ return false
+}
+
+func (x *SubstitutionFormatString) GetContentType() string {
+ if x != nil {
+ return x.ContentType
+ }
+ return ""
+}
+
+func (x *SubstitutionFormatString) GetFormatters() []*TypedExtensionConfig {
+ if x != nil {
+ return x.Formatters
+ }
+ return nil
+}
+
+type isSubstitutionFormatString_Format interface {
+ isSubstitutionFormatString_Format()
+}
+
+type SubstitutionFormatString_TextFormat struct {
+ // Specify a format with command operators to form a text string.
+ // Its details is described in :ref:`format string`.
+ //
+ // For example, setting ``text_format`` like below,
+ //
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
+ //
+ // generates plain text similar to:
+ //
+ // .. code-block:: text
+ //
+ // upstream connect error:503:path=/foo
+ //
+ // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field.
+ //
+ // Deprecated: Do not use.
+ TextFormat string `protobuf:"bytes,1,opt,name=text_format,json=textFormat,proto3,oneof"`
+}
+
+type SubstitutionFormatString_JsonFormat struct {
+ // Specify a format with command operators to form a JSON string.
+ // Its details is described in :ref:`format dictionary`.
+ // Values are rendered as strings, numbers, or boolean values as appropriate.
+ // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).
+ // See the documentation for a specific command operator for details.
+ //
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // json_format:
+ // status: "%RESPONSE_CODE%"
+ // message: "%LOCAL_REPLY_BODY%"
+ //
+ // The following JSON object would be created:
+ //
+ // .. code-block:: json
+ //
+ // {
+ // "status": 500,
+ // "message": "My error message"
+ // }
+ //
+ JsonFormat *_struct.Struct `protobuf:"bytes,2,opt,name=json_format,json=jsonFormat,proto3,oneof"`
+}
+
+type SubstitutionFormatString_TextFormatSource struct {
+ // Specify a format with command operators to form a text string.
+ // Its details is described in :ref:`format string`.
+ //
+ // For example, setting ``text_format`` like below,
+ //
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // text_format_source:
+ // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
+ //
+ // generates plain text similar to:
+ //
+ // .. code-block:: text
+ //
+ // upstream connect error:503:path=/foo
+ //
+ TextFormatSource *DataSource `protobuf:"bytes,5,opt,name=text_format_source,json=textFormatSource,proto3,oneof"`
+}
+
+func (*SubstitutionFormatString_TextFormat) isSubstitutionFormatString_Format() {}
+
+func (*SubstitutionFormatString_JsonFormat) isSubstitutionFormatString_Format() {}
+
+func (*SubstitutionFormatString_TextFormatSource) isSubstitutionFormatString_Format() {}
+
+var File_envoy_config_core_v3_substitution_format_string_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = []byte{
+ 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x8c, 0x03, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x0b,
+ 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00,
+ 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x44, 0x0a, 0x0b,
+ 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a,
+ 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x48, 0x00, 0x52, 0x10, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x6d, 0x70,
+ 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0f, 0x6f, 0x6d, 0x69, 0x74, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73,
+ 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x4a, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72,
+ 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54,
+ 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x73, 0x42,
+ 0x0d, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x4d,
+ 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69,
+ 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = file_envoy_config_core_v3_substitution_format_string_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_substitution_format_string_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_substitution_format_string_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_substitution_format_string_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_substitution_format_string_proto_goTypes = []interface{}{
+ (*SubstitutionFormatString)(nil), // 0: envoy.config.core.v3.SubstitutionFormatString
+ (*_struct.Struct)(nil), // 1: google.protobuf.Struct
+ (*DataSource)(nil), // 2: envoy.config.core.v3.DataSource
+ (*TypedExtensionConfig)(nil), // 3: envoy.config.core.v3.TypedExtensionConfig
+}
+var file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.SubstitutionFormatString.json_format:type_name -> google.protobuf.Struct
+ 2, // 1: envoy.config.core.v3.SubstitutionFormatString.text_format_source:type_name -> envoy.config.core.v3.DataSource
+ 3, // 2: envoy.config.core.v3.SubstitutionFormatString.formatters:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_substitution_format_string_proto_init() }
+func file_envoy_config_core_v3_substitution_format_string_proto_init() {
+ if File_envoy_config_core_v3_substitution_format_string_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_base_proto_init()
+ file_envoy_config_core_v3_extension_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubstitutionFormatString); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*SubstitutionFormatString_TextFormat)(nil),
+ (*SubstitutionFormatString_JsonFormat)(nil),
+ (*SubstitutionFormatString_TextFormatSource)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_substitution_format_string_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_substitution_format_string_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_substitution_format_string_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_substitution_format_string_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_substitution_format_string_proto = out.File
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = nil
+ file_envoy_config_core_v3_substitution_format_string_proto_goTypes = nil
+ file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go
new file mode 100644
index 000000000..374fdedcf
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/substitution_format_string.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on SubstitutionFormatString with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *SubstitutionFormatString) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for OmitEmptyValues
+
+ // no validation rules for ContentType
+
+ for idx, item := range m.GetFormatters() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SubstitutionFormatStringValidationError{
+ field: fmt.Sprintf("Formatters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ switch m.Format.(type) {
+
+ case *SubstitutionFormatString_TextFormat:
+ // no validation rules for TextFormat
+
+ case *SubstitutionFormatString_JsonFormat:
+
+ if m.GetJsonFormat() == nil {
+ return SubstitutionFormatStringValidationError{
+ field: "JsonFormat",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SubstitutionFormatStringValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *SubstitutionFormatString_TextFormatSource:
+
+ if v, ok := interface{}(m.GetTextFormatSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SubstitutionFormatStringValidationError{
+ field: "TextFormatSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return SubstitutionFormatStringValidationError{
+ field: "Format",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// SubstitutionFormatStringValidationError is the validation error returned by
+// SubstitutionFormatString.Validate if the designated constraints aren't met.
+type SubstitutionFormatStringValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SubstitutionFormatStringValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SubstitutionFormatStringValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SubstitutionFormatStringValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SubstitutionFormatStringValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SubstitutionFormatStringValidationError) ErrorName() string {
+ return "SubstitutionFormatStringValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SubstitutionFormatStringValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSubstitutionFormatString.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SubstitutionFormatStringValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SubstitutionFormatStringValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go
new file mode 100644
index 000000000..d31449f4b
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go
@@ -0,0 +1,191 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/config/core/v3/udp_socket_config.proto
+
+package envoy_config_core_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Generic UDP socket configuration.
+type UdpSocketConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate
+ // more memory per socket. Received datagrams above this size will be dropped. If not set
+ // defaults to 1500 bytes.
+ MaxRxDatagramSize *wrappers.UInt64Value `protobuf:"bytes,1,opt,name=max_rx_datagram_size,json=maxRxDatagramSize,proto3" json:"max_rx_datagram_size,omitempty"`
+ // Configures whether Generic Receive Offload (GRO)
+ // _ is preferred when reading from the
+ // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used.
+ // This option affects performance but not functionality. If GRO is not supported by the operating
+ // system, non-GRO receive will be used.
+ PreferGro *wrappers.BoolValue `protobuf:"bytes,2,opt,name=prefer_gro,json=preferGro,proto3" json:"prefer_gro,omitempty"`
+}
+
+func (x *UdpSocketConfig) Reset() {
+ *x = UdpSocketConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UdpSocketConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UdpSocketConfig) ProtoMessage() {}
+
+func (x *UdpSocketConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UdpSocketConfig.ProtoReflect.Descriptor instead.
+func (*UdpSocketConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UdpSocketConfig) GetMaxRxDatagramSize() *wrappers.UInt64Value {
+ if x != nil {
+ return x.MaxRxDatagramSize
+ }
+ return nil
+}
+
+func (x *UdpSocketConfig) GetPreferGro() *wrappers.BoolValue {
+ if x != nil {
+ return x.PreferGro
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_udp_socket_config_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65,
+ 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a,
+ 0x0f, 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x12, 0x5a, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x78, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x67,
+ 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42,
+ 0x08, 0x32, 0x06, 0x10, 0x80, 0x80, 0x04, 0x20, 0x00, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x52, 0x78,
+ 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a,
+ 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x42, 0x44, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x55,
+ 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = file_envoy_config_core_v3_udp_socket_config_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_udp_socket_config_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_udp_socket_config_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_udp_socket_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_udp_socket_config_proto_goTypes = []interface{}{
+ (*UdpSocketConfig)(nil), // 0: envoy.config.core.v3.UdpSocketConfig
+ (*wrappers.UInt64Value)(nil), // 1: google.protobuf.UInt64Value
+ (*wrappers.BoolValue)(nil), // 2: google.protobuf.BoolValue
+}
+var file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.UdpSocketConfig.max_rx_datagram_size:type_name -> google.protobuf.UInt64Value
+ 2, // 1: envoy.config.core.v3.UdpSocketConfig.prefer_gro:type_name -> google.protobuf.BoolValue
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_udp_socket_config_proto_init() }
+func file_envoy_config_core_v3_udp_socket_config_proto_init() {
+ if File_envoy_config_core_v3_udp_socket_config_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UdpSocketConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_udp_socket_config_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_udp_socket_config_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_udp_socket_config_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_udp_socket_config_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_udp_socket_config_proto = out.File
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = nil
+ file_envoy_config_core_v3_udp_socket_config_proto_goTypes = nil
+ file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go
new file mode 100644
index 000000000..e0a14fced
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go
@@ -0,0 +1,120 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/udp_socket_config.proto
+
+package envoy_config_core_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on UdpSocketConfig with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *UdpSocketConfig) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if wrapper := m.GetMaxRxDatagramSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val <= 0 || val >= 65536 {
+ return UdpSocketConfigValidationError{
+ field: "MaxRxDatagramSize",
+ reason: "value must be inside range (0, 65536)",
+ }
+ }
+
+ }
+
+ if v, ok := interface{}(m.GetPreferGro()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UdpSocketConfigValidationError{
+ field: "PreferGro",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// UdpSocketConfigValidationError is the validation error returned by
+// UdpSocketConfig.Validate if the designated constraints aren't met.
+type UdpSocketConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UdpSocketConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UdpSocketConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UdpSocketConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UdpSocketConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UdpSocketConfigValidationError) ErrorName() string { return "UdpSocketConfigValidationError" }
+
+// Error satisfies the builtin error interface
+func (e UdpSocketConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUdpSocketConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UdpSocketConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UdpSocketConfigValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go
new file mode 100644
index 000000000..8d7504380
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go
@@ -0,0 +1,368 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/service/discovery/v3/ads.proto
+
+package envoy_service_discovery_v3
+
+import (
+ context "context"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing
+// services: https://github.com/google/protobuf/issues/4221
+type AdsDummy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AdsDummy) Reset() {
+ *x = AdsDummy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_service_discovery_v3_ads_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AdsDummy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AdsDummy) ProtoMessage() {}
+
+func (x *AdsDummy) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_service_discovery_v3_ads_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AdsDummy.ProtoReflect.Descriptor instead.
+func (*AdsDummy) Descriptor() ([]byte, []int) {
+ return file_envoy_service_discovery_v3_ads_proto_rawDescGZIP(), []int{0}
+}
+
+var File_envoy_service_discovery_v3_ads_proto protoreflect.FileDescriptor
+
+var file_envoy_service_discovery_v3_ads_proto_rawDesc = []byte{
+ 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f,
+ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e,
+ 0x76, 0x33, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64,
+ 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x36, 0x0a, 0x08, 0x41, 0x64, 0x73, 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x3a, 0x2a, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x32, 0x2e,
+ 0x41, 0x64, 0x73, 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x32, 0xa6, 0x02, 0x0a, 0x1a, 0x41, 0x67, 0x67,
+ 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76,
+ 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e,
+ 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x87, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x74,
+ 0x61, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x73, 0x12, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76,
+ 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72,
+ 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76,
+ 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30,
+ 0x01, 0x42, 0x41, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f,
+ 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x41,
+ 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x88, 0x01, 0x01, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_service_discovery_v3_ads_proto_rawDescOnce sync.Once
+ file_envoy_service_discovery_v3_ads_proto_rawDescData = file_envoy_service_discovery_v3_ads_proto_rawDesc
+)
+
+func file_envoy_service_discovery_v3_ads_proto_rawDescGZIP() []byte {
+ file_envoy_service_discovery_v3_ads_proto_rawDescOnce.Do(func() {
+ file_envoy_service_discovery_v3_ads_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_discovery_v3_ads_proto_rawDescData)
+ })
+ return file_envoy_service_discovery_v3_ads_proto_rawDescData
+}
+
+var file_envoy_service_discovery_v3_ads_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_service_discovery_v3_ads_proto_goTypes = []interface{}{
+ (*AdsDummy)(nil), // 0: envoy.service.discovery.v3.AdsDummy
+ (*DiscoveryRequest)(nil), // 1: envoy.service.discovery.v3.DiscoveryRequest
+ (*DeltaDiscoveryRequest)(nil), // 2: envoy.service.discovery.v3.DeltaDiscoveryRequest
+ (*DiscoveryResponse)(nil), // 3: envoy.service.discovery.v3.DiscoveryResponse
+ (*DeltaDiscoveryResponse)(nil), // 4: envoy.service.discovery.v3.DeltaDiscoveryResponse
+}
+var file_envoy_service_discovery_v3_ads_proto_depIdxs = []int32{
+ 1, // 0: envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources:input_type -> envoy.service.discovery.v3.DiscoveryRequest
+ 2, // 1: envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources:input_type -> envoy.service.discovery.v3.DeltaDiscoveryRequest
+ 3, // 2: envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources:output_type -> envoy.service.discovery.v3.DiscoveryResponse
+ 4, // 3: envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources:output_type -> envoy.service.discovery.v3.DeltaDiscoveryResponse
+ 2, // [2:4] is the sub-list for method output_type
+ 0, // [0:2] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_service_discovery_v3_ads_proto_init() }
+func file_envoy_service_discovery_v3_ads_proto_init() {
+ if File_envoy_service_discovery_v3_ads_proto != nil {
+ return
+ }
+ file_envoy_service_discovery_v3_discovery_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_service_discovery_v3_ads_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AdsDummy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_service_discovery_v3_ads_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_envoy_service_discovery_v3_ads_proto_goTypes,
+ DependencyIndexes: file_envoy_service_discovery_v3_ads_proto_depIdxs,
+ MessageInfos: file_envoy_service_discovery_v3_ads_proto_msgTypes,
+ }.Build()
+ File_envoy_service_discovery_v3_ads_proto = out.File
+ file_envoy_service_discovery_v3_ads_proto_rawDesc = nil
+ file_envoy_service_discovery_v3_ads_proto_goTypes = nil
+ file_envoy_service_discovery_v3_ads_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// AggregatedDiscoveryServiceClient is the client API for AggregatedDiscoveryService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AggregatedDiscoveryServiceClient interface {
+ // This is a gRPC-only API.
+ StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error)
+ DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error)
+}
+
+type aggregatedDiscoveryServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewAggregatedDiscoveryServiceClient(cc grpc.ClientConnInterface) AggregatedDiscoveryServiceClient {
+ return &aggregatedDiscoveryServiceClient{cc}
+}
+
+func (c *aggregatedDiscoveryServiceClient) StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_AggregatedDiscoveryService_serviceDesc.Streams[0], "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &aggregatedDiscoveryServiceStreamAggregatedResourcesClient{stream}
+ return x, nil
+}
+
+type AggregatedDiscoveryService_StreamAggregatedResourcesClient interface {
+ Send(*DiscoveryRequest) error
+ Recv() (*DiscoveryResponse, error)
+ grpc.ClientStream
+}
+
+type aggregatedDiscoveryServiceStreamAggregatedResourcesClient struct {
+ grpc.ClientStream
+}
+
+func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Send(m *DiscoveryRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Recv() (*DiscoveryResponse, error) {
+ m := new(DiscoveryResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *aggregatedDiscoveryServiceClient) DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_AggregatedDiscoveryService_serviceDesc.Streams[1], "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &aggregatedDiscoveryServiceDeltaAggregatedResourcesClient{stream}
+ return x, nil
+}
+
+type AggregatedDiscoveryService_DeltaAggregatedResourcesClient interface {
+ Send(*DeltaDiscoveryRequest) error
+ Recv() (*DeltaDiscoveryResponse, error)
+ grpc.ClientStream
+}
+
+type aggregatedDiscoveryServiceDeltaAggregatedResourcesClient struct {
+ grpc.ClientStream
+}
+
+func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Send(m *DeltaDiscoveryRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Recv() (*DeltaDiscoveryResponse, error) {
+ m := new(DeltaDiscoveryResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// AggregatedDiscoveryServiceServer is the server API for AggregatedDiscoveryService service.
+type AggregatedDiscoveryServiceServer interface {
+ // This is a gRPC-only API.
+ StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error
+ DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error
+}
+
+// UnimplementedAggregatedDiscoveryServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedAggregatedDiscoveryServiceServer struct {
+}
+
+func (*UnimplementedAggregatedDiscoveryServiceServer) StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error {
+ return status.Errorf(codes.Unimplemented, "method StreamAggregatedResources not implemented")
+}
+func (*UnimplementedAggregatedDiscoveryServiceServer) DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error {
+ return status.Errorf(codes.Unimplemented, "method DeltaAggregatedResources not implemented")
+}
+
+func RegisterAggregatedDiscoveryServiceServer(s *grpc.Server, srv AggregatedDiscoveryServiceServer) {
+ s.RegisterService(&_AggregatedDiscoveryService_serviceDesc, srv)
+}
+
+func _AggregatedDiscoveryService_StreamAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(AggregatedDiscoveryServiceServer).StreamAggregatedResources(&aggregatedDiscoveryServiceStreamAggregatedResourcesServer{stream})
+}
+
+type AggregatedDiscoveryService_StreamAggregatedResourcesServer interface {
+ Send(*DiscoveryResponse) error
+ Recv() (*DiscoveryRequest, error)
+ grpc.ServerStream
+}
+
+type aggregatedDiscoveryServiceStreamAggregatedResourcesServer struct {
+ grpc.ServerStream
+}
+
+func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Send(m *DiscoveryResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Recv() (*DiscoveryRequest, error) {
+ m := new(DiscoveryRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _AggregatedDiscoveryService_DeltaAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(AggregatedDiscoveryServiceServer).DeltaAggregatedResources(&aggregatedDiscoveryServiceDeltaAggregatedResourcesServer{stream})
+}
+
+type AggregatedDiscoveryService_DeltaAggregatedResourcesServer interface {
+ Send(*DeltaDiscoveryResponse) error
+ Recv() (*DeltaDiscoveryRequest, error)
+ grpc.ServerStream
+}
+
+type aggregatedDiscoveryServiceDeltaAggregatedResourcesServer struct {
+ grpc.ServerStream
+}
+
+func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Send(m *DeltaDiscoveryResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Recv() (*DeltaDiscoveryRequest, error) {
+ m := new(DeltaDiscoveryRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _AggregatedDiscoveryService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "envoy.service.discovery.v3.AggregatedDiscoveryService",
+ HandlerType: (*AggregatedDiscoveryServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StreamAggregatedResources",
+ Handler: _AggregatedDiscoveryService_StreamAggregatedResources_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "DeltaAggregatedResources",
+ Handler: _AggregatedDiscoveryService_DeltaAggregatedResources_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "envoy/service/discovery/v3/ads.proto",
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go
new file mode 100644
index 000000000..14f3d05a4
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go
@@ -0,0 +1,98 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/service/discovery/v3/ads.proto
+
+package envoy_service_discovery_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on AdsDummy with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *AdsDummy) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// AdsDummyValidationError is the validation error returned by
+// AdsDummy.Validate if the designated constraints aren't met.
+type AdsDummyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AdsDummyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AdsDummyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AdsDummyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AdsDummyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AdsDummyValidationError) ErrorName() string { return "AdsDummyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AdsDummyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAdsDummy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AdsDummyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AdsDummyValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go
new file mode 100644
index 000000000..ae35621c2
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go
@@ -0,0 +1,981 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/service/discovery/v3/discovery.proto
+
+package envoy_service_discovery_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// A DiscoveryRequest requests a set of versioned resources of the same type for
+// a given Envoy node on some API.
+// [#next-free-field: 7]
+type DiscoveryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The version_info provided in the request messages will be the version_info
+ // received with the most recent successfully processed response or empty on
+ // the first request. It is expected that no new request is sent after a
+ // response is received until the Envoy instance is ready to ACK/NACK the new
+ // configuration. ACK/NACK takes place by returning the new API config version
+ // as applied or the previous API config version respectively. Each type_url
+ // (see below) has an independent version associated with it.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The node making the request.
+ Node *v3.Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"`
+ // List of resources to subscribe to, e.g. list of cluster names or a route
+ // configuration name. If this is empty, all resources for the API are
+ // returned. LDS/CDS may have empty resource_names, which will cause all
+ // resources for the Envoy instance to be returned. The LDS and CDS responses
+ // will then imply a number of resources that need to be fetched via EDS/RDS,
+ // which will be explicitly enumerated in resource_names.
+ ResourceNames []string `protobuf:"bytes,3,rep,name=resource_names,json=resourceNames,proto3" json:"resource_names,omitempty"`
+ // Type of the resource that is being requested, e.g.
+ // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit
+ // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is
+ // required for ADS.
+ TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above
+ // discussion on version_info and the DiscoveryResponse nonce comment. This
+ // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP,
+ // or 2) the client has not yet accepted an update in this xDS stream (unlike
+ // delta, where it is populated only for new explicit ACKs).
+ ResponseNonce string `protobuf:"bytes,5,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"`
+ // This is populated when the previous :ref:`DiscoveryResponse `
+ // failed to update configuration. The *message* field in *error_details* provides the Envoy
+ // internal exception related to the failure. It is only intended for consumption during manual
+ // debugging, the string provided is not guaranteed to be stable across Envoy versions.
+ ErrorDetail *status.Status `protobuf:"bytes,6,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"`
+}
+
+func (x *DiscoveryRequest) Reset() {
+ *x = DiscoveryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DiscoveryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DiscoveryRequest) ProtoMessage() {}
+
+func (x *DiscoveryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DiscoveryRequest.ProtoReflect.Descriptor instead.
+func (*DiscoveryRequest) Descriptor() ([]byte, []int) {
+ return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DiscoveryRequest) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *DiscoveryRequest) GetNode() *v3.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *DiscoveryRequest) GetResourceNames() []string {
+ if x != nil {
+ return x.ResourceNames
+ }
+ return nil
+}
+
+func (x *DiscoveryRequest) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *DiscoveryRequest) GetResponseNonce() string {
+ if x != nil {
+ return x.ResponseNonce
+ }
+ return ""
+}
+
+func (x *DiscoveryRequest) GetErrorDetail() *status.Status {
+ if x != nil {
+ return x.ErrorDetail
+ }
+ return nil
+}
+
+// [#next-free-field: 7]
+type DiscoveryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The version of the response data.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The response resources. These resources are typed and depend on the API being called.
+ Resources []*any.Any `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"`
+ // [#not-implemented-hide:]
+ // Canary is used to support two Envoy command line flags:
+ //
+ // * --terminate-on-canary-transition-failure. When set, Envoy is able to
+ // terminate if it detects that configuration is stuck at canary. Consider
+ // this example sequence of updates:
+ // - Management server applies a canary config successfully.
+ // - Management server rolls back to a production config.
+ // - Envoy rejects the new production config.
+ // Since there is no sensible way to continue receiving configuration
+ // updates, Envoy will then terminate and apply production config from a
+ // clean slate.
+ // * --dry-run-canary. When set, a canary response will never be applied, only
+ // validated via a dry run.
+ Canary bool `protobuf:"varint,3,opt,name=canary,proto3" json:"canary,omitempty"`
+ // Type URL for resources. Identifies the xDS API when muxing over ADS.
+ // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty).
+ TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ // For gRPC based subscriptions, the nonce provides a way to explicitly ack a
+ // specific DiscoveryResponse in a following DiscoveryRequest. Additional
+ // messages may have been sent by Envoy to the management server for the
+ // previous version on the stream prior to this DiscoveryResponse, that were
+ // unprocessed at response send time. The nonce allows the management server
+ // to ignore any further DiscoveryRequests for the previous version until a
+ // DiscoveryRequest bearing the nonce. The nonce is optional and is not
+ // required for non-stream based xDS implementations.
+ Nonce string `protobuf:"bytes,5,opt,name=nonce,proto3" json:"nonce,omitempty"`
+ // The control plane instance that sent the response.
+ ControlPlane *v3.ControlPlane `protobuf:"bytes,6,opt,name=control_plane,json=controlPlane,proto3" json:"control_plane,omitempty"`
+}
+
+func (x *DiscoveryResponse) Reset() {
+ *x = DiscoveryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DiscoveryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DiscoveryResponse) ProtoMessage() {}
+
+func (x *DiscoveryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DiscoveryResponse.ProtoReflect.Descriptor instead.
+func (*DiscoveryResponse) Descriptor() ([]byte, []int) {
+ return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *DiscoveryResponse) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *DiscoveryResponse) GetResources() []*any.Any {
+ if x != nil {
+ return x.Resources
+ }
+ return nil
+}
+
+func (x *DiscoveryResponse) GetCanary() bool {
+ if x != nil {
+ return x.Canary
+ }
+ return false
+}
+
+func (x *DiscoveryResponse) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *DiscoveryResponse) GetNonce() string {
+ if x != nil {
+ return x.Nonce
+ }
+ return ""
+}
+
+func (x *DiscoveryResponse) GetControlPlane() *v3.ControlPlane {
+ if x != nil {
+ return x.ControlPlane
+ }
+ return nil
+}
+
+// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC
+// endpoint for Delta xDS.
+//
+// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full
+// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a
+// diff to the state of a xDS client.
+// In Delta XDS there are per-resource versions, which allow tracking state at
+// the resource granularity.
+// An xDS Delta session is always in the context of a gRPC bidirectional
+// stream. This allows the xDS server to keep track of the state of xDS clients
+// connected to it.
+//
+// In Delta xDS the nonce field is required and used to pair
+// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK.
+// Optionally, a response message level system_version_info is present for
+// debugging purposes only.
+//
+// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest
+// can be either or both of: [1] informing the server of what resources the
+// client has gained/lost interest in (using resource_names_subscribe and
+// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from
+// the server (using response_nonce, with presence of error_detail making it a NACK).
+// Additionally, the first message (for a given type_url) of a reconnected gRPC stream
+// has a third role: informing the server of the resources (and their versions)
+// that the client already possesses, using the initial_resource_versions field.
+//
+// As with state-of-the-world, when multiple resource types are multiplexed (ADS),
+// all requests/acknowledgments/updates are logically walled off by type_url:
+// a Cluster ACK exists in a completely separate world from a prior Route NACK.
+// In particular, initial_resource_versions being sent at the "start" of every
+// gRPC stream actually entails a message for each type_url, each with its own
+// initial_resource_versions.
+// [#next-free-field: 8]
+type DeltaDiscoveryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The node making the request.
+ Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // Type of the resource that is being requested, e.g.
+ // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if
+ // resources are only referenced via *xds_resource_subscribe* and
+ // *xds_resources_unsubscribe*.
+ TypeUrl string `protobuf:"bytes,2,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ // DeltaDiscoveryRequests allow the client to add or remove individual
+ // resources to the set of tracked resources in the context of a stream.
+ // All resource names in the resource_names_subscribe list are added to the
+ // set of tracked resources and all resource names in the resource_names_unsubscribe
+ // list are removed from the set of tracked resources.
+ //
+ // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or
+ // resource_names_unsubscribe list simply means that no resources are to be
+ // added or removed to the resource list.
+ // *Like* state-of-the-world xDS, the server must send updates for all tracked
+ // resources, but can also send updates for resources the client has not subscribed to.
+ //
+ // NOTE: the server must respond with all resources listed in resource_names_subscribe,
+ // even if it believes the client has the most recent version of them. The reason:
+ // the client may have dropped them, but then regained interest before it had a chance
+ // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd.
+ //
+ // These two fields can be set in any DeltaDiscoveryRequest, including ACKs
+ // and initial_resource_versions.
+ //
+ // A list of Resource names to add to the list of tracked resources.
+ ResourceNamesSubscribe []string `protobuf:"bytes,3,rep,name=resource_names_subscribe,json=resourceNamesSubscribe,proto3" json:"resource_names_subscribe,omitempty"`
+ // A list of Resource names to remove from the list of tracked resources.
+ ResourceNamesUnsubscribe []string `protobuf:"bytes,4,rep,name=resource_names_unsubscribe,json=resourceNamesUnsubscribe,proto3" json:"resource_names_unsubscribe,omitempty"`
+ // Informs the server of the versions of the resources the xDS client knows of, to enable the
+ // client to continue the same logical xDS session even in the face of gRPC stream reconnection.
+ // It will not be populated: [1] in the very first stream of a session, since the client will
+ // not yet have any resources, [2] in any message after the first in a stream (for a given
+ // type_url), since the server will already be correctly tracking the client's state.
+ // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.)
+ // The map's keys are names of xDS resources known to the xDS client.
+ // The map's values are opaque resource versions.
+ InitialResourceVersions map[string]string `protobuf:"bytes,5,rep,name=initial_resource_versions,json=initialResourceVersions,proto3" json:"initial_resource_versions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // When the DeltaDiscoveryRequest is a ACK or NACK message in response
+ // to a previous DeltaDiscoveryResponse, the response_nonce must be the
+ // nonce in the DeltaDiscoveryResponse.
+ // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted.
+ ResponseNonce string `protobuf:"bytes,6,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"`
+ // This is populated when the previous :ref:`DiscoveryResponse `
+ // failed to update configuration. The *message* field in *error_details*
+ // provides the Envoy internal exception related to the failure.
+ ErrorDetail *status.Status `protobuf:"bytes,7,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"`
+}
+
+func (x *DeltaDiscoveryRequest) Reset() {
+ *x = DeltaDiscoveryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeltaDiscoveryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeltaDiscoveryRequest) ProtoMessage() {}
+
+func (x *DeltaDiscoveryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeltaDiscoveryRequest.ProtoReflect.Descriptor instead.
+func (*DeltaDiscoveryRequest) Descriptor() ([]byte, []int) {
+ return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DeltaDiscoveryRequest) GetNode() *v3.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *DeltaDiscoveryRequest) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *DeltaDiscoveryRequest) GetResourceNamesSubscribe() []string {
+ if x != nil {
+ return x.ResourceNamesSubscribe
+ }
+ return nil
+}
+
+func (x *DeltaDiscoveryRequest) GetResourceNamesUnsubscribe() []string {
+ if x != nil {
+ return x.ResourceNamesUnsubscribe
+ }
+ return nil
+}
+
+func (x *DeltaDiscoveryRequest) GetInitialResourceVersions() map[string]string {
+ if x != nil {
+ return x.InitialResourceVersions
+ }
+ return nil
+}
+
+func (x *DeltaDiscoveryRequest) GetResponseNonce() string {
+ if x != nil {
+ return x.ResponseNonce
+ }
+ return ""
+}
+
+func (x *DeltaDiscoveryRequest) GetErrorDetail() *status.Status {
+ if x != nil {
+ return x.ErrorDetail
+ }
+ return nil
+}
+
+// [#next-free-field: 8]
+type DeltaDiscoveryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The version of the response data (used for debugging).
+ SystemVersionInfo string `protobuf:"bytes,1,opt,name=system_version_info,json=systemVersionInfo,proto3" json:"system_version_info,omitempty"`
+ // The response resources. These are typed resources, whose types must match
+ // the type_url field.
+ Resources []*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"`
+ // Type URL for resources. Identifies the xDS API when muxing over ADS.
+ // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty.
+ TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ // Resources names of resources that have be deleted and to be removed from the xDS Client.
+ // Removed resources for missing resources can be ignored.
+ RemovedResources []string `protobuf:"bytes,6,rep,name=removed_resources,json=removedResources,proto3" json:"removed_resources,omitempty"`
+ // The nonce provides a way for DeltaDiscoveryRequests to uniquely
+ // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required.
+ Nonce string `protobuf:"bytes,5,opt,name=nonce,proto3" json:"nonce,omitempty"`
+ // [#not-implemented-hide:]
+ // The control plane instance that sent the response.
+ ControlPlane *v3.ControlPlane `protobuf:"bytes,7,opt,name=control_plane,json=controlPlane,proto3" json:"control_plane,omitempty"`
+}
+
+func (x *DeltaDiscoveryResponse) Reset() {
+ *x = DeltaDiscoveryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeltaDiscoveryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeltaDiscoveryResponse) ProtoMessage() {}
+
+func (x *DeltaDiscoveryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeltaDiscoveryResponse.ProtoReflect.Descriptor instead.
+func (*DeltaDiscoveryResponse) Descriptor() ([]byte, []int) {
+ return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *DeltaDiscoveryResponse) GetSystemVersionInfo() string {
+ if x != nil {
+ return x.SystemVersionInfo
+ }
+ return ""
+}
+
+func (x *DeltaDiscoveryResponse) GetResources() []*Resource {
+ if x != nil {
+ return x.Resources
+ }
+ return nil
+}
+
+func (x *DeltaDiscoveryResponse) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *DeltaDiscoveryResponse) GetRemovedResources() []string {
+ if x != nil {
+ return x.RemovedResources
+ }
+ return nil
+}
+
+func (x *DeltaDiscoveryResponse) GetNonce() string {
+ if x != nil {
+ return x.Nonce
+ }
+ return ""
+}
+
+func (x *DeltaDiscoveryResponse) GetControlPlane() *v3.ControlPlane {
+ if x != nil {
+ return x.ControlPlane
+ }
+ return nil
+}
+
+// [#next-free-field: 8]
+type Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource's name, to distinguish it from others of the same type of resource.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // The aliases are a list of other names that this resource can go by.
+ Aliases []string `protobuf:"bytes,4,rep,name=aliases,proto3" json:"aliases,omitempty"`
+ // The resource level version. It allows xDS to track the state of individual
+ // resources.
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ // The resource being tracked.
+ Resource *any.Any `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Time-to-live value for the resource. For each resource, a timer is started. The timer is
+ // reset each time the resource is received with a new TTL. If the resource is received with
+ // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the
+ // configuration for the resource will be removed.
+ //
+ // The TTL can be refreshed or changed by sending a response that doesn't change the resource
+ // version. In this case the resource field does not need to be populated, which allows for
+ // light-weight "heartbeat" updates to keep a resource with a TTL alive.
+ //
+ // The TTL feature is meant to support configurations that should be removed in the event of
+ // a management server failure. For example, the feature may be used for fault injection
+ // testing where the fault injection should be terminated in the event that Envoy loses contact
+ // with the management server.
+ Ttl *duration.Duration `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ // Cache control properties for the resource.
+ // [#not-implemented-hide:]
+ CacheControl *Resource_CacheControl `protobuf:"bytes,7,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"`
+}
+
+func (x *Resource) Reset() {
+ *x = Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+ return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Resource) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Resource) GetAliases() []string {
+ if x != nil {
+ return x.Aliases
+ }
+ return nil
+}
+
+func (x *Resource) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *Resource) GetResource() *any.Any {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *Resource) GetTtl() *duration.Duration {
+ if x != nil {
+ return x.Ttl
+ }
+ return nil
+}
+
+func (x *Resource) GetCacheControl() *Resource_CacheControl {
+ if x != nil {
+ return x.CacheControl
+ }
+ return nil
+}
+
+// Cache control properties for the resource.
+// [#not-implemented-hide:]
+type Resource_CacheControl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // If true, xDS proxies may not cache this resource.
+ // Note that this does not apply to clients other than xDS proxies, which must cache resources
+ // for their own use, regardless of the value of this field.
+ DoNotCache bool `protobuf:"varint,1,opt,name=do_not_cache,json=doNotCache,proto3" json:"do_not_cache,omitempty"`
+}
+
+func (x *Resource_CacheControl) Reset() {
+ *x = Resource_CacheControl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Resource_CacheControl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource_CacheControl) ProtoMessage() {}
+
+func (x *Resource_CacheControl) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource_CacheControl.ProtoReflect.Descriptor instead.
+func (*Resource_CacheControl) Descriptor() ([]byte, []int) {
+ return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *Resource_CacheControl) GetDoNotCache() bool {
+ if x != nil {
+ return x.DoNotCache
+ }
+ return false
+}
+
+var File_envoy_service_discovery_v3_discovery_proto protoreflect.FileDescriptor
+
+var file_envoy_service_discovery_v3_discovery_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f,
+ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73,
+ 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63,
+ 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0xab, 0x02, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64,
+ 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x19,
+ 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65,
+ 0x12, 0x35, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x73,
+ 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa3, 0x02,
+ 0x0a, 0x11, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61,
+ 0x6e, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x61, 0x6e, 0x61,
+ 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a,
+ 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f,
+ 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70,
+ 0x6c, 0x61, 0x6e, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x52, 0x0c,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x3a, 0x25, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0xbc, 0x04, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73,
+ 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a,
+ 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a,
+ 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63,
+ 0x72, 0x69, 0x62, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
+ 0x62, 0x65, 0x12, 0x3c, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x75, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
+ 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
+ 0x12, 0x8a, 0x01, 0x0a, 0x19, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76,
+ 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a,
+ 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4e,
+ 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65,
+ 0x74, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x1a, 0x4a, 0x0a, 0x1c, 0x49,
+ 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c,
+ 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x22, 0xdf, 0x02, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63,
+ 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a,
+ 0x13, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x79, 0x73, 0x74,
+ 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x42, 0x0a,
+ 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x11,
+ 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e,
+ 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12,
+ 0x47, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x65,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a,
+ 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65,
+ 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd9, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73,
+ 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12,
+ 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
+ 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74,
+ 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x56, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68,
+ 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x1a, 0x30, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x12, 0x20, 0x0a, 0x0c, 0x64, 0x6f, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x6f, 0x4e, 0x6f, 0x74, 0x43, 0x61, 0x63,
+ 0x68, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x42, 0x44, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x44, 0x69,
+ 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80,
+ 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_service_discovery_v3_discovery_proto_rawDescOnce sync.Once
+ file_envoy_service_discovery_v3_discovery_proto_rawDescData = file_envoy_service_discovery_v3_discovery_proto_rawDesc
+)
+
+func file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP() []byte {
+ file_envoy_service_discovery_v3_discovery_proto_rawDescOnce.Do(func() {
+ file_envoy_service_discovery_v3_discovery_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_discovery_v3_discovery_proto_rawDescData)
+ })
+ return file_envoy_service_discovery_v3_discovery_proto_rawDescData
+}
+
+var file_envoy_service_discovery_v3_discovery_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_envoy_service_discovery_v3_discovery_proto_goTypes = []interface{}{
+ (*DiscoveryRequest)(nil), // 0: envoy.service.discovery.v3.DiscoveryRequest
+ (*DiscoveryResponse)(nil), // 1: envoy.service.discovery.v3.DiscoveryResponse
+ (*DeltaDiscoveryRequest)(nil), // 2: envoy.service.discovery.v3.DeltaDiscoveryRequest
+ (*DeltaDiscoveryResponse)(nil), // 3: envoy.service.discovery.v3.DeltaDiscoveryResponse
+ (*Resource)(nil), // 4: envoy.service.discovery.v3.Resource
+ nil, // 5: envoy.service.discovery.v3.DeltaDiscoveryRequest.InitialResourceVersionsEntry
+ (*Resource_CacheControl)(nil), // 6: envoy.service.discovery.v3.Resource.CacheControl
+ (*v3.Node)(nil), // 7: envoy.config.core.v3.Node
+ (*status.Status)(nil), // 8: google.rpc.Status
+ (*any.Any)(nil), // 9: google.protobuf.Any
+ (*v3.ControlPlane)(nil), // 10: envoy.config.core.v3.ControlPlane
+ (*duration.Duration)(nil), // 11: google.protobuf.Duration
+}
+var file_envoy_service_discovery_v3_discovery_proto_depIdxs = []int32{
+ 7, // 0: envoy.service.discovery.v3.DiscoveryRequest.node:type_name -> envoy.config.core.v3.Node
+ 8, // 1: envoy.service.discovery.v3.DiscoveryRequest.error_detail:type_name -> google.rpc.Status
+ 9, // 2: envoy.service.discovery.v3.DiscoveryResponse.resources:type_name -> google.protobuf.Any
+ 10, // 3: envoy.service.discovery.v3.DiscoveryResponse.control_plane:type_name -> envoy.config.core.v3.ControlPlane
+ 7, // 4: envoy.service.discovery.v3.DeltaDiscoveryRequest.node:type_name -> envoy.config.core.v3.Node
+ 5, // 5: envoy.service.discovery.v3.DeltaDiscoveryRequest.initial_resource_versions:type_name -> envoy.service.discovery.v3.DeltaDiscoveryRequest.InitialResourceVersionsEntry
+ 8, // 6: envoy.service.discovery.v3.DeltaDiscoveryRequest.error_detail:type_name -> google.rpc.Status
+ 4, // 7: envoy.service.discovery.v3.DeltaDiscoveryResponse.resources:type_name -> envoy.service.discovery.v3.Resource
+ 10, // 8: envoy.service.discovery.v3.DeltaDiscoveryResponse.control_plane:type_name -> envoy.config.core.v3.ControlPlane
+ 9, // 9: envoy.service.discovery.v3.Resource.resource:type_name -> google.protobuf.Any
+ 11, // 10: envoy.service.discovery.v3.Resource.ttl:type_name -> google.protobuf.Duration
+ 6, // 11: envoy.service.discovery.v3.Resource.cache_control:type_name -> envoy.service.discovery.v3.Resource.CacheControl
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_envoy_service_discovery_v3_discovery_proto_init() }
+func file_envoy_service_discovery_v3_discovery_proto_init() {
+ if File_envoy_service_discovery_v3_discovery_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_service_discovery_v3_discovery_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DiscoveryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_service_discovery_v3_discovery_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DiscoveryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_service_discovery_v3_discovery_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeltaDiscoveryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_service_discovery_v3_discovery_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeltaDiscoveryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_service_discovery_v3_discovery_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_service_discovery_v3_discovery_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Resource_CacheControl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_service_discovery_v3_discovery_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_service_discovery_v3_discovery_proto_goTypes,
+ DependencyIndexes: file_envoy_service_discovery_v3_discovery_proto_depIdxs,
+ MessageInfos: file_envoy_service_discovery_v3_discovery_proto_msgTypes,
+ }.Build()
+ File_envoy_service_discovery_v3_discovery_proto = out.File
+ file_envoy_service_discovery_v3_discovery_proto_rawDesc = nil
+ file_envoy_service_discovery_v3_discovery_proto_goTypes = nil
+ file_envoy_service_discovery_v3_discovery_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go
new file mode 100644
index 000000000..3d2d3556d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go
@@ -0,0 +1,583 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/service/discovery/v3/discovery.proto
+
+package envoy_service_discovery_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on DiscoveryRequest with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *DiscoveryRequest) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for VersionInfo
+
+ if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DiscoveryRequestValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for TypeUrl
+
+ // no validation rules for ResponseNonce
+
+ if v, ok := interface{}(m.GetErrorDetail()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DiscoveryRequestValidationError{
+ field: "ErrorDetail",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// DiscoveryRequestValidationError is the validation error returned by
+// DiscoveryRequest.Validate if the designated constraints aren't met.
+type DiscoveryRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DiscoveryRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DiscoveryRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DiscoveryRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DiscoveryRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DiscoveryRequestValidationError) ErrorName() string { return "DiscoveryRequestValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DiscoveryRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDiscoveryRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DiscoveryRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DiscoveryRequestValidationError{}
+
+// Validate checks the field values on DiscoveryResponse with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *DiscoveryResponse) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for VersionInfo
+
+ for idx, item := range m.GetResources() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DiscoveryResponseValidationError{
+ field: fmt.Sprintf("Resources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for Canary
+
+ // no validation rules for TypeUrl
+
+ // no validation rules for Nonce
+
+ if v, ok := interface{}(m.GetControlPlane()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DiscoveryResponseValidationError{
+ field: "ControlPlane",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// DiscoveryResponseValidationError is the validation error returned by
+// DiscoveryResponse.Validate if the designated constraints aren't met.
+type DiscoveryResponseValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DiscoveryResponseValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DiscoveryResponseValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DiscoveryResponseValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DiscoveryResponseValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DiscoveryResponseValidationError) ErrorName() string {
+ return "DiscoveryResponseValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DiscoveryResponseValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDiscoveryResponse.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DiscoveryResponseValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DiscoveryResponseValidationError{}
+
+// Validate checks the field values on DeltaDiscoveryRequest with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *DeltaDiscoveryRequest) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DeltaDiscoveryRequestValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for TypeUrl
+
+ // no validation rules for InitialResourceVersions
+
+ // no validation rules for ResponseNonce
+
+ if v, ok := interface{}(m.GetErrorDetail()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DeltaDiscoveryRequestValidationError{
+ field: "ErrorDetail",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// DeltaDiscoveryRequestValidationError is the validation error returned by
+// DeltaDiscoveryRequest.Validate if the designated constraints aren't met.
+type DeltaDiscoveryRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DeltaDiscoveryRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DeltaDiscoveryRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DeltaDiscoveryRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DeltaDiscoveryRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DeltaDiscoveryRequestValidationError) ErrorName() string {
+ return "DeltaDiscoveryRequestValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DeltaDiscoveryRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDeltaDiscoveryRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DeltaDiscoveryRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DeltaDiscoveryRequestValidationError{}
+
+// Validate checks the field values on DeltaDiscoveryResponse with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *DeltaDiscoveryResponse) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for SystemVersionInfo
+
+ for idx, item := range m.GetResources() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DeltaDiscoveryResponseValidationError{
+ field: fmt.Sprintf("Resources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for TypeUrl
+
+ // no validation rules for Nonce
+
+ if v, ok := interface{}(m.GetControlPlane()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DeltaDiscoveryResponseValidationError{
+ field: "ControlPlane",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// DeltaDiscoveryResponseValidationError is the validation error returned by
+// DeltaDiscoveryResponse.Validate if the designated constraints aren't met.
+type DeltaDiscoveryResponseValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DeltaDiscoveryResponseValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DeltaDiscoveryResponseValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DeltaDiscoveryResponseValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DeltaDiscoveryResponseValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DeltaDiscoveryResponseValidationError) ErrorName() string {
+ return "DeltaDiscoveryResponseValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DeltaDiscoveryResponseValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDeltaDiscoveryResponse.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DeltaDiscoveryResponseValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DeltaDiscoveryResponseValidationError{}
+
+// Validate checks the field values on Resource with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Resource) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Name
+
+ // no validation rules for Version
+
+ if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetTtl()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Ttl",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if v, ok := interface{}(m.GetCacheControl()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "CacheControl",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// ResourceValidationError is the validation error returned by
+// Resource.Validate if the designated constraints aren't met.
+type ResourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceValidationError{}
+
+// Validate checks the field values on Resource_CacheControl with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *Resource_CacheControl) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for DoNotCache
+
+ return nil
+}
+
+// Resource_CacheControlValidationError is the validation error returned by
+// Resource_CacheControl.Validate if the designated constraints aren't met.
+type Resource_CacheControlValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Resource_CacheControlValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Resource_CacheControlValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Resource_CacheControlValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Resource_CacheControlValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Resource_CacheControlValidationError) ErrorName() string {
+ return "Resource_CacheControlValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Resource_CacheControlValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResource_CacheControl.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Resource_CacheControlValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Resource_CacheControlValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go
new file mode 100644
index 000000000..7166d7ac3
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go
@@ -0,0 +1,379 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/http_inputs.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Match input indicates that matching should be done on a specific request header.
+// The resulting input string will be all headers for the given key joined by a comma,
+// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input
+// string will be 'bar,baz'.
+// [#comment:TODO(snowp): Link to unified matching docs.]
+type HttpRequestHeaderMatchInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The request header to match on.
+ HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"`
+}
+
+func (x *HttpRequestHeaderMatchInput) Reset() {
+ *x = HttpRequestHeaderMatchInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpRequestHeaderMatchInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpRequestHeaderMatchInput) ProtoMessage() {}
+
+func (x *HttpRequestHeaderMatchInput) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpRequestHeaderMatchInput.ProtoReflect.Descriptor instead.
+func (*HttpRequestHeaderMatchInput) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HttpRequestHeaderMatchInput) GetHeaderName() string {
+ if x != nil {
+ return x.HeaderName
+ }
+ return ""
+}
+
+// Match input indicates that matching should be done on a specific request trailer.
+// The resulting input string will be all headers for the given key joined by a comma,
+// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input
+// string will be 'bar,baz'.
+// [#comment:TODO(snowp): Link to unified matching docs.]
+type HttpRequestTrailerMatchInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The request trailer to match on.
+ HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"`
+}
+
+func (x *HttpRequestTrailerMatchInput) Reset() {
+ *x = HttpRequestTrailerMatchInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpRequestTrailerMatchInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpRequestTrailerMatchInput) ProtoMessage() {}
+
+func (x *HttpRequestTrailerMatchInput) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpRequestTrailerMatchInput.ProtoReflect.Descriptor instead.
+func (*HttpRequestTrailerMatchInput) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HttpRequestTrailerMatchInput) GetHeaderName() string {
+ if x != nil {
+ return x.HeaderName
+ }
+ return ""
+}
+
+// Match input indicating that matching should be done on a specific response header.
+// The resulting input string will be all headers for the given key joined by a comma,
+// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input
+// string will be 'bar,baz'.
+// [#comment:TODO(snowp): Link to unified matching docs.]
+type HttpResponseHeaderMatchInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The response header to match on.
+ HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"`
+}
+
+func (x *HttpResponseHeaderMatchInput) Reset() {
+ *x = HttpResponseHeaderMatchInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpResponseHeaderMatchInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpResponseHeaderMatchInput) ProtoMessage() {}
+
+func (x *HttpResponseHeaderMatchInput) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpResponseHeaderMatchInput.ProtoReflect.Descriptor instead.
+func (*HttpResponseHeaderMatchInput) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *HttpResponseHeaderMatchInput) GetHeaderName() string {
+ if x != nil {
+ return x.HeaderName
+ }
+ return ""
+}
+
+// Match input indicates that matching should be done on a specific response trailer.
+// The resulting input string will be all headers for the given key joined by a comma,
+// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input
+// string will be 'bar,baz'.
+// [#comment:TODO(snowp): Link to unified matching docs.]
+type HttpResponseTrailerMatchInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The response trailer to match on.
+ HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"`
+}
+
+func (x *HttpResponseTrailerMatchInput) Reset() {
+ *x = HttpResponseTrailerMatchInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpResponseTrailerMatchInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpResponseTrailerMatchInput) ProtoMessage() {}
+
+func (x *HttpResponseTrailerMatchInput) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpResponseTrailerMatchInput.ProtoReflect.Descriptor instead.
+func (*HttpResponseTrailerMatchInput) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *HttpResponseTrailerMatchInput) GetHeaderName() string {
+ if x != nil {
+ return x.HeaderName
+ }
+ return ""
+}
+
+var File_envoy_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70,
+ 0x75, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x1b, 0x48, 0x74, 0x74, 0x70,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42,
+ 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x1c, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72,
+ 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e,
+ 0x61, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x1c, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e,
+ 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0,
+ 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d,
+ 0x65, 0x22, 0x4d, 0x0a, 0x1d, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70,
+ 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01,
+ 0x01, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65,
+ 0x42, 0x40, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70,
+ 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02,
+ 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_http_inputs_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_http_inputs_proto_rawDescData = file_envoy_type_matcher_v3_http_inputs_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_http_inputs_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_http_inputs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_http_inputs_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_http_inputs_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_envoy_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{
+ (*HttpRequestHeaderMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpRequestHeaderMatchInput
+ (*HttpRequestTrailerMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpRequestTrailerMatchInput
+ (*HttpResponseHeaderMatchInput)(nil), // 2: envoy.type.matcher.v3.HttpResponseHeaderMatchInput
+ (*HttpResponseTrailerMatchInput)(nil), // 3: envoy.type.matcher.v3.HttpResponseTrailerMatchInput
+}
+var file_envoy_type_matcher_v3_http_inputs_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_http_inputs_proto_init() }
+func file_envoy_type_matcher_v3_http_inputs_proto_init() {
+ if File_envoy_type_matcher_v3_http_inputs_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpRequestHeaderMatchInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpRequestTrailerMatchInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpResponseHeaderMatchInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpResponseTrailerMatchInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_http_inputs_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_http_inputs_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_http_inputs_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_http_inputs_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_http_inputs_proto = out.File
+ file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_http_inputs_proto_goTypes = nil
+ file_envoy_type_matcher_v3_http_inputs_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go
new file mode 100644
index 000000000..2ab0e6a93
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go
@@ -0,0 +1,342 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/http_inputs.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on HttpRequestHeaderMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HttpRequestHeaderMatchInput) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if !_HttpRequestHeaderMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) {
+ return HttpRequestHeaderMatchInputValidationError{
+ field: "HeaderName",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ return nil
+}
+
+// HttpRequestHeaderMatchInputValidationError is the validation error returned
+// by HttpRequestHeaderMatchInput.Validate if the designated constraints
+// aren't met.
+type HttpRequestHeaderMatchInputValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpRequestHeaderMatchInputValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpRequestHeaderMatchInputValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpRequestHeaderMatchInputValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpRequestHeaderMatchInputValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpRequestHeaderMatchInputValidationError) ErrorName() string {
+ return "HttpRequestHeaderMatchInputValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpRequestHeaderMatchInputValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpRequestHeaderMatchInput.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpRequestHeaderMatchInputValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpRequestHeaderMatchInputValidationError{}
+
+var _HttpRequestHeaderMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HttpRequestTrailerMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HttpRequestTrailerMatchInput) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if !_HttpRequestTrailerMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) {
+ return HttpRequestTrailerMatchInputValidationError{
+ field: "HeaderName",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ return nil
+}
+
+// HttpRequestTrailerMatchInputValidationError is the validation error returned
+// by HttpRequestTrailerMatchInput.Validate if the designated constraints
+// aren't met.
+type HttpRequestTrailerMatchInputValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpRequestTrailerMatchInputValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpRequestTrailerMatchInputValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpRequestTrailerMatchInputValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpRequestTrailerMatchInputValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpRequestTrailerMatchInputValidationError) ErrorName() string {
+ return "HttpRequestTrailerMatchInputValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpRequestTrailerMatchInputValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpRequestTrailerMatchInput.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpRequestTrailerMatchInputValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpRequestTrailerMatchInputValidationError{}
+
+var _HttpRequestTrailerMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HttpResponseHeaderMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HttpResponseHeaderMatchInput) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if !_HttpResponseHeaderMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) {
+ return HttpResponseHeaderMatchInputValidationError{
+ field: "HeaderName",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ return nil
+}
+
+// HttpResponseHeaderMatchInputValidationError is the validation error returned
+// by HttpResponseHeaderMatchInput.Validate if the designated constraints
+// aren't met.
+type HttpResponseHeaderMatchInputValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpResponseHeaderMatchInputValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpResponseHeaderMatchInputValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpResponseHeaderMatchInputValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpResponseHeaderMatchInputValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpResponseHeaderMatchInputValidationError) ErrorName() string {
+ return "HttpResponseHeaderMatchInputValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpResponseHeaderMatchInputValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpResponseHeaderMatchInput.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpResponseHeaderMatchInputValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpResponseHeaderMatchInputValidationError{}
+
+var _HttpResponseHeaderMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HttpResponseTrailerMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HttpResponseTrailerMatchInput) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if !_HttpResponseTrailerMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) {
+ return HttpResponseTrailerMatchInputValidationError{
+ field: "HeaderName",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ }
+
+ return nil
+}
+
+// HttpResponseTrailerMatchInputValidationError is the validation error
+// returned by HttpResponseTrailerMatchInput.Validate if the designated
+// constraints aren't met.
+type HttpResponseTrailerMatchInputValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpResponseTrailerMatchInputValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpResponseTrailerMatchInputValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpResponseTrailerMatchInputValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpResponseTrailerMatchInputValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpResponseTrailerMatchInputValidationError) ErrorName() string {
+ return "HttpResponseTrailerMatchInputValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpResponseTrailerMatchInputValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpResponseTrailerMatchInput.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpResponseTrailerMatchInputValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpResponseTrailerMatchInputValidationError{}
+
+var _HttpResponseTrailerMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go
new file mode 100644
index 000000000..a2c8e56b0
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go
@@ -0,0 +1,292 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/metadata.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// [#next-major-version: MetadataMatcher should use StructMatcher]
+type MetadataMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The filter name to retrieve the Struct from the Metadata.
+ Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+ // The path to retrieve the Value from the Struct.
+ Path []*MetadataMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"`
+ // The MetadataMatcher is matched if the value retrieved by path is matched to this value.
+ Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MetadataMatcher) Reset() {
+ *x = MetadataMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetadataMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataMatcher) ProtoMessage() {}
+
+func (x *MetadataMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataMatcher.ProtoReflect.Descriptor instead.
+func (*MetadataMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MetadataMatcher) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *MetadataMatcher) GetPath() []*MetadataMatcher_PathSegment {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+
+func (x *MetadataMatcher) GetValue() *ValueMatcher {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// Specifies the segment in a path to retrieve value from Metadata.
+// Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that
+// if the segment key refers to a list, it has to be the last segment in a path.
+type MetadataMatcher_PathSegment struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Segment:
+ // *MetadataMatcher_PathSegment_Key
+ Segment isMetadataMatcher_PathSegment_Segment `protobuf_oneof:"segment"`
+}
+
+func (x *MetadataMatcher_PathSegment) Reset() {
+ *x = MetadataMatcher_PathSegment{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetadataMatcher_PathSegment) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataMatcher_PathSegment) ProtoMessage() {}
+
+func (x *MetadataMatcher_PathSegment) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataMatcher_PathSegment.ProtoReflect.Descriptor instead.
+func (*MetadataMatcher_PathSegment) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (m *MetadataMatcher_PathSegment) GetSegment() isMetadataMatcher_PathSegment_Segment {
+ if m != nil {
+ return m.Segment
+ }
+ return nil
+}
+
+func (x *MetadataMatcher_PathSegment) GetKey() string {
+ if x, ok := x.GetSegment().(*MetadataMatcher_PathSegment_Key); ok {
+ return x.Key
+ }
+ return ""
+}
+
+type isMetadataMatcher_PathSegment_Segment interface {
+ isMetadataMatcher_PathSegment_Segment()
+}
+
+type MetadataMatcher_PathSegment_Key struct {
+ // If specified, use the key to retrieve the value in a Struct.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"`
+}
+
+func (*MetadataMatcher_PathSegment_Key) isMetadataMatcher_PathSegment_Segment() {}
+
+var File_envoy_type_matcher_v3_metadata_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_metadata_proto_rawDesc = []byte{
+ 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x02, 0x0a, 0x0f,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12,
+ 0x1f, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x50, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65,
+ 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x61,
+ 0x74, 0x68, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x71, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53,
+ 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50,
+ 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65,
+ 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x3e, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_metadata_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_metadata_proto_rawDescData = file_envoy_type_matcher_v3_metadata_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_metadata_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_metadata_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_metadata_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_type_matcher_v3_metadata_proto_goTypes = []interface{}{
+ (*MetadataMatcher)(nil), // 0: envoy.type.matcher.v3.MetadataMatcher
+ (*MetadataMatcher_PathSegment)(nil), // 1: envoy.type.matcher.v3.MetadataMatcher.PathSegment
+ (*ValueMatcher)(nil), // 2: envoy.type.matcher.v3.ValueMatcher
+}
+var file_envoy_type_matcher_v3_metadata_proto_depIdxs = []int32{
+ 1, // 0: envoy.type.matcher.v3.MetadataMatcher.path:type_name -> envoy.type.matcher.v3.MetadataMatcher.PathSegment
+ 2, // 1: envoy.type.matcher.v3.MetadataMatcher.value:type_name -> envoy.type.matcher.v3.ValueMatcher
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_metadata_proto_init() }
+func file_envoy_type_matcher_v3_metadata_proto_init() {
+ if File_envoy_type_matcher_v3_metadata_proto != nil {
+ return
+ }
+ file_envoy_type_matcher_v3_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetadataMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetadataMatcher_PathSegment); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_matcher_v3_metadata_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*MetadataMatcher_PathSegment_Key)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_metadata_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_metadata_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_metadata_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_metadata_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_metadata_proto = out.File
+ file_envoy_type_matcher_v3_metadata_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_metadata_proto_goTypes = nil
+ file_envoy_type_matcher_v3_metadata_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go
new file mode 100644
index 000000000..caef318b6
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go
@@ -0,0 +1,232 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/metadata.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on MetadataMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *MetadataMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetFilter()) < 1 {
+ return MetadataMatcherValidationError{
+ field: "Filter",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ if len(m.GetPath()) < 1 {
+ return MetadataMatcherValidationError{
+ field: "Path",
+ reason: "value must contain at least 1 item(s)",
+ }
+ }
+
+ for idx, item := range m.GetPath() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataMatcherValidationError{
+ field: fmt.Sprintf("Path[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if m.GetValue() == nil {
+ return MetadataMatcherValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataMatcherValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// MetadataMatcherValidationError is the validation error returned by
+// MetadataMatcher.Validate if the designated constraints aren't met.
+type MetadataMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MetadataMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MetadataMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MetadataMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MetadataMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MetadataMatcherValidationError) ErrorName() string { return "MetadataMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MetadataMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMetadataMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MetadataMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MetadataMatcherValidationError{}
+
+// Validate checks the field values on MetadataMatcher_PathSegment with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *MetadataMatcher_PathSegment) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Segment.(type) {
+
+ case *MetadataMatcher_PathSegment_Key:
+
+ if utf8.RuneCountInString(m.GetKey()) < 1 {
+ return MetadataMatcher_PathSegmentValidationError{
+ field: "Key",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ default:
+ return MetadataMatcher_PathSegmentValidationError{
+ field: "Segment",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// MetadataMatcher_PathSegmentValidationError is the validation error returned
+// by MetadataMatcher_PathSegment.Validate if the designated constraints
+// aren't met.
+type MetadataMatcher_PathSegmentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MetadataMatcher_PathSegmentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MetadataMatcher_PathSegmentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MetadataMatcher_PathSegmentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MetadataMatcher_PathSegmentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MetadataMatcher_PathSegmentValidationError) ErrorName() string {
+ return "MetadataMatcher_PathSegmentValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MetadataMatcher_PathSegmentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMetadataMatcher_PathSegment.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MetadataMatcher_PathSegmentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MetadataMatcher_PathSegmentValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go
new file mode 100644
index 000000000..f3be740f8
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go
@@ -0,0 +1,189 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/node.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Specifies the way to match a Node.
+// The match follows AND semantics.
+type NodeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies match criteria on the node id.
+ NodeId *StringMatcher `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ // Specifies match criteria on the node metadata.
+ NodeMetadatas []*StructMatcher `protobuf:"bytes,2,rep,name=node_metadatas,json=nodeMetadatas,proto3" json:"node_metadatas,omitempty"`
+}
+
+func (x *NodeMatcher) Reset() {
+ *x = NodeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_node_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodeMatcher) ProtoMessage() {}
+
+func (x *NodeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_node_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodeMatcher.ProtoReflect.Descriptor instead.
+func (*NodeMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_node_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *NodeMatcher) GetNodeId() *StringMatcher {
+ if x != nil {
+ return x.NodeId
+ }
+ return nil
+}
+
+func (x *NodeMatcher) GetNodeMetadatas() []*StructMatcher {
+ if x != nil {
+ return x.NodeMetadatas
+ }
+ return nil
+}
+
+var File_envoy_type_matcher_v3_node_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_node_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65,
+ 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x52, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x73, 0x3a,
+ 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x3a, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4e,
+ 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02,
+ 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_node_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_node_proto_rawDescData = file_envoy_type_matcher_v3_node_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_node_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_node_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_node_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_node_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_node_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_type_matcher_v3_node_proto_goTypes = []interface{}{
+ (*NodeMatcher)(nil), // 0: envoy.type.matcher.v3.NodeMatcher
+ (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher
+ (*StructMatcher)(nil), // 2: envoy.type.matcher.v3.StructMatcher
+}
+var file_envoy_type_matcher_v3_node_proto_depIdxs = []int32{
+ 1, // 0: envoy.type.matcher.v3.NodeMatcher.node_id:type_name -> envoy.type.matcher.v3.StringMatcher
+ 2, // 1: envoy.type.matcher.v3.NodeMatcher.node_metadatas:type_name -> envoy.type.matcher.v3.StructMatcher
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_node_proto_init() }
+func file_envoy_type_matcher_v3_node_proto_init() {
+ if File_envoy_type_matcher_v3_node_proto != nil {
+ return
+ }
+ file_envoy_type_matcher_v3_string_proto_init()
+ file_envoy_type_matcher_v3_struct_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_node_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_node_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_node_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_node_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_node_proto = out.File
+ file_envoy_type_matcher_v3_node_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_node_proto_goTypes = nil
+ file_envoy_type_matcher_v3_node_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go
new file mode 100644
index 000000000..b086cd798
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go
@@ -0,0 +1,124 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/node.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on NodeMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *NodeMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetNodeId()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeMatcherValidationError{
+ field: "NodeId",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetNodeMetadatas() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeMatcherValidationError{
+ field: fmt.Sprintf("NodeMetadatas[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// NodeMatcherValidationError is the validation error returned by
+// NodeMatcher.Validate if the designated constraints aren't met.
+type NodeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NodeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NodeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NodeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NodeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NodeMatcherValidationError) ErrorName() string { return "NodeMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e NodeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNodeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NodeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NodeMatcherValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go
new file mode 100644
index 000000000..44d03b10a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go
@@ -0,0 +1,213 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/number.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Specifies the way to match a double value.
+type DoubleMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatchPattern:
+ // *DoubleMatcher_Range
+ // *DoubleMatcher_Exact
+ MatchPattern isDoubleMatcher_MatchPattern `protobuf_oneof:"match_pattern"`
+}
+
+func (x *DoubleMatcher) Reset() {
+ *x = DoubleMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_number_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleMatcher) ProtoMessage() {}
+
+func (x *DoubleMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_number_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleMatcher.ProtoReflect.Descriptor instead.
+func (*DoubleMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_number_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *DoubleMatcher) GetMatchPattern() isDoubleMatcher_MatchPattern {
+ if m != nil {
+ return m.MatchPattern
+ }
+ return nil
+}
+
+func (x *DoubleMatcher) GetRange() *v3.DoubleRange {
+ if x, ok := x.GetMatchPattern().(*DoubleMatcher_Range); ok {
+ return x.Range
+ }
+ return nil
+}
+
+func (x *DoubleMatcher) GetExact() float64 {
+ if x, ok := x.GetMatchPattern().(*DoubleMatcher_Exact); ok {
+ return x.Exact
+ }
+ return 0
+}
+
+type isDoubleMatcher_MatchPattern interface {
+ isDoubleMatcher_MatchPattern()
+}
+
+type DoubleMatcher_Range struct {
+ // If specified, the input double value must be in the range specified here.
+ // Note: The range is using half-open interval semantics [start, end).
+ Range *v3.DoubleRange `protobuf:"bytes,1,opt,name=range,proto3,oneof"`
+}
+
+type DoubleMatcher_Exact struct {
+ // If specified, the input double value must be equal to the value specified here.
+ Exact float64 `protobuf:"fixed64,2,opt,name=exact,proto3,oneof"`
+}
+
+func (*DoubleMatcher_Range) isDoubleMatcher_MatchPattern() {}
+
+func (*DoubleMatcher_Exact) isDoubleMatcher_MatchPattern() {}
+
+var File_envoy_type_matcher_v3_number_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_number_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69,
+ 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00,
+ 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x3a,
+ 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c,
+ 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x3c,
+ 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_number_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_number_proto_rawDescData = file_envoy_type_matcher_v3_number_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_number_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_number_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_number_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_number_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_number_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_number_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_type_matcher_v3_number_proto_goTypes = []interface{}{
+ (*DoubleMatcher)(nil), // 0: envoy.type.matcher.v3.DoubleMatcher
+ (*v3.DoubleRange)(nil), // 1: envoy.type.v3.DoubleRange
+}
+var file_envoy_type_matcher_v3_number_proto_depIdxs = []int32{
+ 1, // 0: envoy.type.matcher.v3.DoubleMatcher.range:type_name -> envoy.type.v3.DoubleRange
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_number_proto_init() }
+func file_envoy_type_matcher_v3_number_proto_init() {
+ if File_envoy_type_matcher_v3_number_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_number_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_matcher_v3_number_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*DoubleMatcher_Range)(nil),
+ (*DoubleMatcher_Exact)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_number_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_number_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_number_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_number_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_number_proto = out.File
+ file_envoy_type_matcher_v3_number_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_number_proto_goTypes = nil
+ file_envoy_type_matcher_v3_number_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go
new file mode 100644
index 000000000..2b29ac9ac
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go
@@ -0,0 +1,124 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/number.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on DoubleMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *DoubleMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.MatchPattern.(type) {
+
+ case *DoubleMatcher_Range:
+
+ if v, ok := interface{}(m.GetRange()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DoubleMatcherValidationError{
+ field: "Range",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *DoubleMatcher_Exact:
+ // no validation rules for Exact
+
+ default:
+ return DoubleMatcherValidationError{
+ field: "MatchPattern",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// DoubleMatcherValidationError is the validation error returned by
+// DoubleMatcher.Validate if the designated constraints aren't met.
+type DoubleMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleMatcherValidationError) ErrorName() string { return "DoubleMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DoubleMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleMatcherValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go
new file mode 100644
index 000000000..048de5ddb
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go
@@ -0,0 +1,197 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/path.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Specifies the way to match a path on HTTP request.
+type PathMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Rule:
+ // *PathMatcher_Path
+ Rule isPathMatcher_Rule `protobuf_oneof:"rule"`
+}
+
+func (x *PathMatcher) Reset() {
+ *x = PathMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_path_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PathMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PathMatcher) ProtoMessage() {}
+
+func (x *PathMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_path_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PathMatcher.ProtoReflect.Descriptor instead.
+func (*PathMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_path_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *PathMatcher) GetRule() isPathMatcher_Rule {
+ if m != nil {
+ return m.Rule
+ }
+ return nil
+}
+
+func (x *PathMatcher) GetPath() *StringMatcher {
+ if x, ok := x.GetRule().(*PathMatcher_Path); ok {
+ return x.Path
+ }
+ return nil
+}
+
+type isPathMatcher_Rule interface {
+ isPathMatcher_Rule()
+}
+
+type PathMatcher_Path struct {
+ // The `path` must match the URL path portion of the :path header. The query and fragment
+ // string (if present) are removed in the URL path portion.
+ // For example, the path */data* will match the *:path* header */data#fragment?param=value*.
+ Path *StringMatcher `protobuf:"bytes,1,opt,name=path,proto3,oneof"`
+}
+
+func (*PathMatcher_Path) isPathMatcher_Rule() {}
+
+var File_envoy_type_matcher_v3_path_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_path_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0b, 0x50, 0x61, 0x74,
+ 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x3a, 0x25,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8,
+ 0x42, 0x01, 0x42, 0x3a, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x50, 0x61, 0x74, 0x68, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_path_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_path_proto_rawDescData = file_envoy_type_matcher_v3_path_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_path_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_path_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_path_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_path_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_path_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_path_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_type_matcher_v3_path_proto_goTypes = []interface{}{
+ (*PathMatcher)(nil), // 0: envoy.type.matcher.v3.PathMatcher
+ (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher
+}
+var file_envoy_type_matcher_v3_path_proto_depIdxs = []int32{
+ 1, // 0: envoy.type.matcher.v3.PathMatcher.path:type_name -> envoy.type.matcher.v3.StringMatcher
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_path_proto_init() }
+func file_envoy_type_matcher_v3_path_proto_init() {
+ if File_envoy_type_matcher_v3_path_proto != nil {
+ return
+ }
+ file_envoy_type_matcher_v3_string_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_path_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PathMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_matcher_v3_path_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*PathMatcher_Path)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_path_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_path_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_path_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_path_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_path_proto = out.File
+ file_envoy_type_matcher_v3_path_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_path_proto_goTypes = nil
+ file_envoy_type_matcher_v3_path_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go
new file mode 100644
index 000000000..ad770b38d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go
@@ -0,0 +1,128 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/path.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on PathMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *PathMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Rule.(type) {
+
+ case *PathMatcher_Path:
+
+ if m.GetPath() == nil {
+ return PathMatcherValidationError{
+ field: "Path",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetPath()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PathMatcherValidationError{
+ field: "Path",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return PathMatcherValidationError{
+ field: "Rule",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// PathMatcherValidationError is the validation error returned by
+// PathMatcher.Validate if the designated constraints aren't met.
+type PathMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e PathMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e PathMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e PathMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e PathMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e PathMatcherValidationError) ErrorName() string { return "PathMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e PathMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sPathMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = PathMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = PathMatcherValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go
new file mode 100644
index 000000000..8c6185894
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go
@@ -0,0 +1,405 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/regex.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// A regex matcher designed for safety when used with untrusted input.
+type RegexMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to EngineType:
+ // *RegexMatcher_GoogleRe2
+ EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"`
+ // The regex match string. The string must be supported by the configured engine.
+ Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"`
+}
+
+func (x *RegexMatcher) Reset() {
+ *x = RegexMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegexMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegexMatcher) ProtoMessage() {}
+
+func (x *RegexMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead.
+func (*RegexMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType {
+ if m != nil {
+ return m.EngineType
+ }
+ return nil
+}
+
+func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 {
+ if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok {
+ return x.GoogleRe2
+ }
+ return nil
+}
+
+func (x *RegexMatcher) GetRegex() string {
+ if x != nil {
+ return x.Regex
+ }
+ return ""
+}
+
+type isRegexMatcher_EngineType interface {
+ isRegexMatcher_EngineType()
+}
+
+type RegexMatcher_GoogleRe2 struct {
+ // Google's RE2 regex engine.
+ GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"`
+}
+
+func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {}
+
+// Describes how to match a string and then produce a new string using a regular
+// expression and a substitution string.
+type RegexMatchAndSubstitute struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The regular expression used to find portions of a string (hereafter called
+ // the "subject string") that should be replaced. When a new string is
+ // produced during the substitution operation, the new string is initially
+ // the same as the subject string, but then all matches in the subject string
+ // are replaced by the substitution string. If replacing all matches isn't
+ // desired, regular expression anchors can be used to ensure a single match,
+ // so as to replace just one occurrence of a pattern. Capture groups can be
+ // used in the pattern to extract portions of the subject string, and then
+ // referenced in the substitution string.
+ Pattern *RegexMatcher `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ // The string that should be substituted into matching portions of the
+ // subject string during a substitution operation to produce a new string.
+ // Capture groups in the pattern can be referenced in the substitution
+ // string. Note, however, that the syntax for referring to capture groups is
+ // defined by the chosen regular expression engine. Google's `RE2
+ // `_ regular expression engine uses a
+ // backslash followed by the capture group number to denote a numbered
+ // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers
+ // to capture group 2.
+ Substitution string `protobuf:"bytes,2,opt,name=substitution,proto3" json:"substitution,omitempty"`
+}
+
+func (x *RegexMatchAndSubstitute) Reset() {
+ *x = RegexMatchAndSubstitute{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegexMatchAndSubstitute) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegexMatchAndSubstitute) ProtoMessage() {}
+
+func (x *RegexMatchAndSubstitute) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegexMatchAndSubstitute.ProtoReflect.Descriptor instead.
+func (*RegexMatchAndSubstitute) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RegexMatchAndSubstitute) GetPattern() *RegexMatcher {
+ if x != nil {
+ return x.Pattern
+ }
+ return nil
+}
+
+func (x *RegexMatchAndSubstitute) GetSubstitution() string {
+ if x != nil {
+ return x.Substitution
+ }
+ return ""
+}
+
+// Google's `RE2 `_ regex engine. The regex string must adhere to
+// the documented `syntax `_. The engine is designed
+// to complete execution in linear time as well as limit the amount of memory used.
+//
+// Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level`
+// and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or
+// complexity that a compiled regex can have before an exception is thrown or a warning is
+// logged, respectively. `re2.max_program_size.error_level` defaults to 100, and
+// `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning).
+//
+// Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`,
+// which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented
+// each time the program size exceeds the warn level threshold.
+type RegexMatcher_GoogleRE2 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This field controls the RE2 "program size" which is a rough estimate of how complex a
+ // compiled regex is to evaluate. A regex that has a program size greater than the configured
+ // value will fail to compile. In this case, the configured max program size can be increased
+ // or the regex can be simplified. If not specified, the default is 100.
+ //
+ // This field is deprecated; regexp validation should be performed on the management server
+ // instead of being done by each individual client.
+ //
+ // Deprecated: Do not use.
+ MaxProgramSize *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_program_size,json=maxProgramSize,proto3" json:"max_program_size,omitempty"`
+}
+
+func (x *RegexMatcher_GoogleRE2) Reset() {
+ *x = RegexMatcher_GoogleRE2{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegexMatcher_GoogleRE2) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegexMatcher_GoogleRE2) ProtoMessage() {}
+
+func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead.
+func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Deprecated: Do not use.
+func (x *RegexMatcher_GoogleRE2) GetMaxProgramSize() *wrappers.UInt32Value {
+ if x != nil {
+ return x.MaxProgramSize
+ }
+ return nil
+}
+
+var File_envoy_type_matcher_v3_regex_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_regex_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70,
+ 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x02, 0x0a, 0x0c, 0x52,
+ 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x0a, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x42, 0x08,
+ 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x05, 0x72,
+ 0x65, 0x67, 0x65, 0x78, 0x1a, 0x92, 0x01, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52,
+ 0x45, 0x32, 0x12, 0x53, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7,
+ 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x67,
+ 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21,
+ 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x42, 0x12, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xb9, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74,
+ 0x65, 0x12, 0x47, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75,
+ 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x31,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74,
+ 0x65, 0x42, 0x3b, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f,
+ 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, 0x67, 0x65, 0x78, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_regex_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_regex_proto_rawDescData = file_envoy_type_matcher_v3_regex_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_regex_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_regex_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_regex_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_regex_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_envoy_type_matcher_v3_regex_proto_goTypes = []interface{}{
+ (*RegexMatcher)(nil), // 0: envoy.type.matcher.v3.RegexMatcher
+ (*RegexMatchAndSubstitute)(nil), // 1: envoy.type.matcher.v3.RegexMatchAndSubstitute
+ (*RegexMatcher_GoogleRE2)(nil), // 2: envoy.type.matcher.v3.RegexMatcher.GoogleRE2
+ (*wrappers.UInt32Value)(nil), // 3: google.protobuf.UInt32Value
+}
+var file_envoy_type_matcher_v3_regex_proto_depIdxs = []int32{
+ 2, // 0: envoy.type.matcher.v3.RegexMatcher.google_re2:type_name -> envoy.type.matcher.v3.RegexMatcher.GoogleRE2
+ 0, // 1: envoy.type.matcher.v3.RegexMatchAndSubstitute.pattern:type_name -> envoy.type.matcher.v3.RegexMatcher
+ 3, // 2: envoy.type.matcher.v3.RegexMatcher.GoogleRE2.max_program_size:type_name -> google.protobuf.UInt32Value
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_regex_proto_init() }
+func file_envoy_type_matcher_v3_regex_proto_init() {
+ if File_envoy_type_matcher_v3_regex_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegexMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegexMatchAndSubstitute); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_regex_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegexMatcher_GoogleRE2); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_matcher_v3_regex_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*RegexMatcher_GoogleRe2)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_regex_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_regex_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_regex_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_regex_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_regex_proto = out.File
+ file_envoy_type_matcher_v3_regex_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_regex_proto_goTypes = nil
+ file_envoy_type_matcher_v3_regex_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go
new file mode 100644
index 000000000..467052c98
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go
@@ -0,0 +1,298 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/regex.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on RegexMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *RegexMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if utf8.RuneCountInString(m.GetRegex()) < 1 {
+ return RegexMatcherValidationError{
+ field: "Regex",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ switch m.EngineType.(type) {
+
+ case *RegexMatcher_GoogleRe2:
+
+ if m.GetGoogleRe2() == nil {
+ return RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return RegexMatcherValidationError{
+ field: "EngineType",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// RegexMatcherValidationError is the validation error returned by
+// RegexMatcher.Validate if the designated constraints aren't met.
+type RegexMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RegexMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RegexMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RegexMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RegexMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RegexMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRegexMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RegexMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RegexMatcherValidationError{}
+
+// Validate checks the field values on RegexMatchAndSubstitute with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *RegexMatchAndSubstitute) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetPattern() == nil {
+ return RegexMatchAndSubstituteValidationError{
+ field: "Pattern",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetPattern()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RegexMatchAndSubstituteValidationError{
+ field: "Pattern",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Substitution
+
+ return nil
+}
+
+// RegexMatchAndSubstituteValidationError is the validation error returned by
+// RegexMatchAndSubstitute.Validate if the designated constraints aren't met.
+type RegexMatchAndSubstituteValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RegexMatchAndSubstituteValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RegexMatchAndSubstituteValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RegexMatchAndSubstituteValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RegexMatchAndSubstituteValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RegexMatchAndSubstituteValidationError) ErrorName() string {
+ return "RegexMatchAndSubstituteValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RegexMatchAndSubstituteValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRegexMatchAndSubstitute.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RegexMatchAndSubstituteValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RegexMatchAndSubstituteValidationError{}
+
+// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *RegexMatcher_GoogleRE2) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if v, ok := interface{}(m.GetMaxProgramSize()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RegexMatcher_GoogleRE2ValidationError{
+ field: "MaxProgramSize",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// RegexMatcher_GoogleRE2ValidationError is the validation error returned by
+// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met.
+type RegexMatcher_GoogleRE2ValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string {
+ return "RegexMatcher_GoogleRE2ValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RegexMatcher_GoogleRE2ValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRegexMatcher_GoogleRE2.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RegexMatcher_GoogleRE2ValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RegexMatcher_GoogleRE2ValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go
new file mode 100644
index 000000000..31bc576ce
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go
@@ -0,0 +1,399 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/string.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Specifies the way to match a string.
+// [#next-free-field: 8]
+type StringMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatchPattern:
+ // *StringMatcher_Exact
+ // *StringMatcher_Prefix
+ // *StringMatcher_Suffix
+ // *StringMatcher_SafeRegex
+ // *StringMatcher_Contains
+ // *StringMatcher_HiddenEnvoyDeprecatedRegex
+ MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"`
+ // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no
+ // effect for the safe_regex match.
+ // For example, the matcher *data* will match both input string *Data* and *data* if set to true.
+ IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"`
+}
+
+func (x *StringMatcher) Reset() {
+ *x = StringMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StringMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringMatcher) ProtoMessage() {}
+
+func (x *StringMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead.
+func (*StringMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern {
+ if m != nil {
+ return m.MatchPattern
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetExact() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok {
+ return x.Exact
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetPrefix() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok {
+ return x.Prefix
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetSuffix() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok {
+ return x.Suffix
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetSafeRegex() *RegexMatcher {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok {
+ return x.SafeRegex
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetContains() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok {
+ return x.Contains
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (x *StringMatcher) GetHiddenEnvoyDeprecatedRegex() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_HiddenEnvoyDeprecatedRegex); ok {
+ return x.HiddenEnvoyDeprecatedRegex
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetIgnoreCase() bool {
+ if x != nil {
+ return x.IgnoreCase
+ }
+ return false
+}
+
+type isStringMatcher_MatchPattern interface {
+ isStringMatcher_MatchPattern()
+}
+
+type StringMatcher_Exact struct {
+ // The input string must match exactly the string specified here.
+ //
+ // Examples:
+ //
+ // * *abc* only matches the value *abc*.
+ Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"`
+}
+
+type StringMatcher_Prefix struct {
+ // The input string must have the prefix specified here.
+ // Note: empty prefix is not allowed, please use regex instead.
+ //
+ // Examples:
+ //
+ // * *abc* matches the value *abc.xyz*
+ Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"`
+}
+
+type StringMatcher_Suffix struct {
+ // The input string must have the suffix specified here.
+ // Note: empty prefix is not allowed, please use regex instead.
+ //
+ // Examples:
+ //
+ // * *abc* matches the value *xyz.abc*
+ Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"`
+}
+
+type StringMatcher_SafeRegex struct {
+ // The input string must match the regular expression specified here.
+ SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"`
+}
+
+type StringMatcher_Contains struct {
+ // The input string must have the substring specified here.
+ // Note: empty contains match is not allowed, please use regex instead.
+ //
+ // Examples:
+ //
+ // * *abc* matches the value *xyz.abc.def*
+ Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"`
+}
+
+type StringMatcher_HiddenEnvoyDeprecatedRegex struct {
+ // Deprecated: Do not use.
+ HiddenEnvoyDeprecatedRegex string `protobuf:"bytes,4,opt,name=hidden_envoy_deprecated_regex,json=hiddenEnvoyDeprecatedRegex,proto3,oneof"`
+}
+
+func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_HiddenEnvoyDeprecatedRegex) isStringMatcher_MatchPattern() {}
+
+// Specifies a list of ways to match a string.
+type ListStringMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"`
+}
+
+func (x *ListStringMatcher) Reset() {
+ *x = ListStringMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListStringMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListStringMatcher) ProtoMessage() {}
+
+func (x *ListStringMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead.
+func (*ListStringMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListStringMatcher) GetPatterns() []*StringMatcher {
+ if x != nil {
+ return x.Patterns
+ }
+ return nil
+}
+
+var File_envoy_type_matcher_v3_string_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_string_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f,
+ 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4,
+ 0x03, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
+ 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73,
+ 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04,
+ 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4e,
+ 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
+ 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25,
+ 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x5e, 0x0a, 0x1d, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x5f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x19, 0x18, 0x01,
+ 0xfa, 0x42, 0x05, 0x72, 0x03, 0x28, 0x80, 0x08, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e,
+ 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x48, 0x00, 0x52, 0x1a, 0x68, 0x69, 0x64, 0x64, 0x65,
+ 0x6e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f,
+ 0x63, 0x61, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x43, 0x61, 0x73, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42,
+ 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x8c, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74,
+ 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x08, 0x70,
+ 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70,
+ 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x42, 0x3c, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02,
+ 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_string_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_string_proto_rawDescData = file_envoy_type_matcher_v3_string_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_string_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_string_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_string_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_string_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_type_matcher_v3_string_proto_goTypes = []interface{}{
+ (*StringMatcher)(nil), // 0: envoy.type.matcher.v3.StringMatcher
+ (*ListStringMatcher)(nil), // 1: envoy.type.matcher.v3.ListStringMatcher
+ (*RegexMatcher)(nil), // 2: envoy.type.matcher.v3.RegexMatcher
+}
+var file_envoy_type_matcher_v3_string_proto_depIdxs = []int32{
+ 2, // 0: envoy.type.matcher.v3.StringMatcher.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher
+ 0, // 1: envoy.type.matcher.v3.ListStringMatcher.patterns:type_name -> envoy.type.matcher.v3.StringMatcher
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_string_proto_init() }
+func file_envoy_type_matcher_v3_string_proto_init() {
+ if File_envoy_type_matcher_v3_string_proto != nil {
+ return
+ }
+ file_envoy_type_matcher_v3_regex_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StringMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListStringMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*StringMatcher_Exact)(nil),
+ (*StringMatcher_Prefix)(nil),
+ (*StringMatcher_Suffix)(nil),
+ (*StringMatcher_SafeRegex)(nil),
+ (*StringMatcher_Contains)(nil),
+ (*StringMatcher_HiddenEnvoyDeprecatedRegex)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_string_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_string_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_string_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_string_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_string_proto = out.File
+ file_envoy_type_matcher_v3_string_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_string_proto_goTypes = nil
+ file_envoy_type_matcher_v3_string_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go
new file mode 100644
index 000000000..1f3c183ed
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go
@@ -0,0 +1,258 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/string.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on StringMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *StringMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for IgnoreCase
+
+ switch m.MatchPattern.(type) {
+
+ case *StringMatcher_Exact:
+ // no validation rules for Exact
+
+ case *StringMatcher_Prefix:
+
+ if utf8.RuneCountInString(m.GetPrefix()) < 1 {
+ return StringMatcherValidationError{
+ field: "Prefix",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ case *StringMatcher_Suffix:
+
+ if utf8.RuneCountInString(m.GetSuffix()) < 1 {
+ return StringMatcherValidationError{
+ field: "Suffix",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ case *StringMatcher_SafeRegex:
+
+ if m.GetSafeRegex() == nil {
+ return StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *StringMatcher_Contains:
+
+ if utf8.RuneCountInString(m.GetContains()) < 1 {
+ return StringMatcherValidationError{
+ field: "Contains",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ case *StringMatcher_HiddenEnvoyDeprecatedRegex:
+
+ if len(m.GetHiddenEnvoyDeprecatedRegex()) > 1024 {
+ return StringMatcherValidationError{
+ field: "HiddenEnvoyDeprecatedRegex",
+ reason: "value length must be at most 1024 bytes",
+ }
+ }
+
+ default:
+ return StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// StringMatcherValidationError is the validation error returned by
+// StringMatcher.Validate if the designated constraints aren't met.
+type StringMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StringMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StringMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StringMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StringMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StringMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStringMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StringMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StringMatcherValidationError{}
+
+// Validate checks the field values on ListStringMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *ListStringMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if len(m.GetPatterns()) < 1 {
+ return ListStringMatcherValidationError{
+ field: "Patterns",
+ reason: "value must contain at least 1 item(s)",
+ }
+ }
+
+ for idx, item := range m.GetPatterns() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListStringMatcherValidationError{
+ field: fmt.Sprintf("Patterns[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ListStringMatcherValidationError is the validation error returned by
+// ListStringMatcher.Validate if the designated constraints aren't met.
+type ListStringMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListStringMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListStringMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListStringMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListStringMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListStringMatcherValidationError) ErrorName() string {
+ return "ListStringMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListStringMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListStringMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListStringMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListStringMatcherValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go
new file mode 100644
index 000000000..38fe9eb6a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go
@@ -0,0 +1,328 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/struct.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// StructMatcher provides a general interface to check if a given value is matched in
+// google.protobuf.Struct. It uses `path` to retrieve the value
+// from the struct and then check if it's matched to the specified value.
+//
+// For example, for the following Struct:
+//
+// .. code-block:: yaml
+//
+// fields:
+// a:
+// struct_value:
+// fields:
+// b:
+// struct_value:
+// fields:
+// c:
+// string_value: pro
+// t:
+// list_value:
+// values:
+// - string_value: m
+// - string_value: n
+//
+// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro"
+// from the Metadata which is matched to the specified prefix match.
+//
+// .. code-block:: yaml
+//
+// path:
+// - key: a
+// - key: b
+// - key: c
+// value:
+// string_match:
+// prefix: pr
+//
+// The following StructMatcher is matched as the code will match one of the string values in the
+// list at the path [a, t].
+//
+// .. code-block:: yaml
+//
+// path:
+// - key: a
+// - key: t
+// value:
+// list_match:
+// one_of:
+// string_match:
+// exact: m
+//
+// An example use of StructMatcher is to match metadata in envoy.v*.core.Node.
+type StructMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The path to retrieve the Value from the Struct.
+ Path []*StructMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"`
+ // The StructMatcher is matched if the value retrieved by path is matched to this value.
+ Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *StructMatcher) Reset() {
+ *x = StructMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StructMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StructMatcher) ProtoMessage() {}
+
+func (x *StructMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StructMatcher.ProtoReflect.Descriptor instead.
+func (*StructMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *StructMatcher) GetPath() []*StructMatcher_PathSegment {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+
+func (x *StructMatcher) GetValue() *ValueMatcher {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// Specifies the segment in a path to retrieve value from Struct.
+type StructMatcher_PathSegment struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Segment:
+ // *StructMatcher_PathSegment_Key
+ Segment isStructMatcher_PathSegment_Segment `protobuf_oneof:"segment"`
+}
+
+func (x *StructMatcher_PathSegment) Reset() {
+ *x = StructMatcher_PathSegment{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StructMatcher_PathSegment) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StructMatcher_PathSegment) ProtoMessage() {}
+
+func (x *StructMatcher_PathSegment) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StructMatcher_PathSegment.ProtoReflect.Descriptor instead.
+func (*StructMatcher_PathSegment) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_struct_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (m *StructMatcher_PathSegment) GetSegment() isStructMatcher_PathSegment_Segment {
+ if m != nil {
+ return m.Segment
+ }
+ return nil
+}
+
+func (x *StructMatcher_PathSegment) GetKey() string {
+ if x, ok := x.GetSegment().(*StructMatcher_PathSegment_Key); ok {
+ return x.Key
+ }
+ return ""
+}
+
+type isStructMatcher_PathSegment_Segment interface {
+ isStructMatcher_PathSegment_Segment()
+}
+
+type StructMatcher_PathSegment_Key struct {
+ // If specified, use the key to retrieve the value in a Struct.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"`
+}
+
+func (*StructMatcher_PathSegment_Key) isStructMatcher_PathSegment_Segment() {}
+
+var File_envoy_type_matcher_v3_struct_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_struct_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f,
+ 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x02, 0x0a, 0x0d, 0x53, 0x74,
+ 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x04, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x43, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x1a, 0x6f, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12,
+ 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42,
+ 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x33, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e,
+ 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42,
+ 0x01, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x3c, 0x0a, 0x23, 0x69, 0x6f,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_struct_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_struct_proto_rawDescData = file_envoy_type_matcher_v3_struct_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_struct_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_struct_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_struct_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_struct_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_type_matcher_v3_struct_proto_goTypes = []interface{}{
+ (*StructMatcher)(nil), // 0: envoy.type.matcher.v3.StructMatcher
+ (*StructMatcher_PathSegment)(nil), // 1: envoy.type.matcher.v3.StructMatcher.PathSegment
+ (*ValueMatcher)(nil), // 2: envoy.type.matcher.v3.ValueMatcher
+}
+var file_envoy_type_matcher_v3_struct_proto_depIdxs = []int32{
+ 1, // 0: envoy.type.matcher.v3.StructMatcher.path:type_name -> envoy.type.matcher.v3.StructMatcher.PathSegment
+ 2, // 1: envoy.type.matcher.v3.StructMatcher.value:type_name -> envoy.type.matcher.v3.ValueMatcher
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_struct_proto_init() }
+func file_envoy_type_matcher_v3_struct_proto_init() {
+ if File_envoy_type_matcher_v3_struct_proto != nil {
+ return
+ }
+ file_envoy_type_matcher_v3_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StructMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StructMatcher_PathSegment); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_matcher_v3_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*StructMatcher_PathSegment_Key)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_struct_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_struct_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_struct_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_struct_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_struct_proto = out.File
+ file_envoy_type_matcher_v3_struct_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_struct_proto_goTypes = nil
+ file_envoy_type_matcher_v3_struct_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go
new file mode 100644
index 000000000..54050eb1d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go
@@ -0,0 +1,224 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/struct.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on StructMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *StructMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if len(m.GetPath()) < 1 {
+ return StructMatcherValidationError{
+ field: "Path",
+ reason: "value must contain at least 1 item(s)",
+ }
+ }
+
+ for idx, item := range m.GetPath() {
+ _, _ = idx, item
+
+ if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StructMatcherValidationError{
+ field: fmt.Sprintf("Path[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if m.GetValue() == nil {
+ return StructMatcherValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+ }
+
+ if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StructMatcherValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ return nil
+}
+
+// StructMatcherValidationError is the validation error returned by
+// StructMatcher.Validate if the designated constraints aren't met.
+type StructMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StructMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StructMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StructMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StructMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StructMatcherValidationError) ErrorName() string { return "StructMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StructMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStructMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StructMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StructMatcherValidationError{}
+
+// Validate checks the field values on StructMatcher_PathSegment with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *StructMatcher_PathSegment) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.Segment.(type) {
+
+ case *StructMatcher_PathSegment_Key:
+
+ if utf8.RuneCountInString(m.GetKey()) < 1 {
+ return StructMatcher_PathSegmentValidationError{
+ field: "Key",
+ reason: "value length must be at least 1 runes",
+ }
+ }
+
+ default:
+ return StructMatcher_PathSegmentValidationError{
+ field: "Segment",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// StructMatcher_PathSegmentValidationError is the validation error returned by
+// StructMatcher_PathSegment.Validate if the designated constraints aren't met.
+type StructMatcher_PathSegmentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StructMatcher_PathSegmentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StructMatcher_PathSegmentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StructMatcher_PathSegmentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StructMatcher_PathSegmentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StructMatcher_PathSegmentValidationError) ErrorName() string {
+ return "StructMatcher_PathSegmentValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e StructMatcher_PathSegmentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStructMatcher_PathSegment.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StructMatcher_PathSegmentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StructMatcher_PathSegmentValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go
new file mode 100644
index 000000000..d55d82058
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go
@@ -0,0 +1,461 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/matcher/v3/value.proto
+
+package envoy_type_matcher_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported.
+// StructValue is not supported and is always not matched.
+// [#next-free-field: 7]
+type ValueMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies how to match a value.
+ //
+ // Types that are assignable to MatchPattern:
+ // *ValueMatcher_NullMatch_
+ // *ValueMatcher_DoubleMatch
+ // *ValueMatcher_StringMatch
+ // *ValueMatcher_BoolMatch
+ // *ValueMatcher_PresentMatch
+ // *ValueMatcher_ListMatch
+ MatchPattern isValueMatcher_MatchPattern `protobuf_oneof:"match_pattern"`
+}
+
+func (x *ValueMatcher) Reset() {
+ *x = ValueMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValueMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValueMatcher) ProtoMessage() {}
+
+func (x *ValueMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValueMatcher.ProtoReflect.Descriptor instead.
+func (*ValueMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *ValueMatcher) GetMatchPattern() isValueMatcher_MatchPattern {
+ if m != nil {
+ return m.MatchPattern
+ }
+ return nil
+}
+
+func (x *ValueMatcher) GetNullMatch() *ValueMatcher_NullMatch {
+ if x, ok := x.GetMatchPattern().(*ValueMatcher_NullMatch_); ok {
+ return x.NullMatch
+ }
+ return nil
+}
+
+func (x *ValueMatcher) GetDoubleMatch() *DoubleMatcher {
+ if x, ok := x.GetMatchPattern().(*ValueMatcher_DoubleMatch); ok {
+ return x.DoubleMatch
+ }
+ return nil
+}
+
+func (x *ValueMatcher) GetStringMatch() *StringMatcher {
+ if x, ok := x.GetMatchPattern().(*ValueMatcher_StringMatch); ok {
+ return x.StringMatch
+ }
+ return nil
+}
+
+func (x *ValueMatcher) GetBoolMatch() bool {
+ if x, ok := x.GetMatchPattern().(*ValueMatcher_BoolMatch); ok {
+ return x.BoolMatch
+ }
+ return false
+}
+
+func (x *ValueMatcher) GetPresentMatch() bool {
+ if x, ok := x.GetMatchPattern().(*ValueMatcher_PresentMatch); ok {
+ return x.PresentMatch
+ }
+ return false
+}
+
+func (x *ValueMatcher) GetListMatch() *ListMatcher {
+ if x, ok := x.GetMatchPattern().(*ValueMatcher_ListMatch); ok {
+ return x.ListMatch
+ }
+ return nil
+}
+
+type isValueMatcher_MatchPattern interface {
+ isValueMatcher_MatchPattern()
+}
+
+type ValueMatcher_NullMatch_ struct {
+ // If specified, a match occurs if and only if the target value is a NullValue.
+ NullMatch *ValueMatcher_NullMatch `protobuf:"bytes,1,opt,name=null_match,json=nullMatch,proto3,oneof"`
+}
+
+type ValueMatcher_DoubleMatch struct {
+ // If specified, a match occurs if and only if the target value is a double value and is
+ // matched to this field.
+ DoubleMatch *DoubleMatcher `protobuf:"bytes,2,opt,name=double_match,json=doubleMatch,proto3,oneof"`
+}
+
+type ValueMatcher_StringMatch struct {
+ // If specified, a match occurs if and only if the target value is a string value and is
+ // matched to this field.
+ StringMatch *StringMatcher `protobuf:"bytes,3,opt,name=string_match,json=stringMatch,proto3,oneof"`
+}
+
+type ValueMatcher_BoolMatch struct {
+ // If specified, a match occurs if and only if the target value is a bool value and is equal
+ // to this field.
+ BoolMatch bool `protobuf:"varint,4,opt,name=bool_match,json=boolMatch,proto3,oneof"`
+}
+
+type ValueMatcher_PresentMatch struct {
+ // If specified, value match will be performed based on whether the path is referring to a
+ // valid primitive value in the metadata. If the path is referring to a non-primitive value,
+ // the result is always not matched.
+ PresentMatch bool `protobuf:"varint,5,opt,name=present_match,json=presentMatch,proto3,oneof"`
+}
+
+type ValueMatcher_ListMatch struct {
+ // If specified, a match occurs if and only if the target value is a list value and
+ // is matched to this field.
+ ListMatch *ListMatcher `protobuf:"bytes,6,opt,name=list_match,json=listMatch,proto3,oneof"`
+}
+
+func (*ValueMatcher_NullMatch_) isValueMatcher_MatchPattern() {}
+
+func (*ValueMatcher_DoubleMatch) isValueMatcher_MatchPattern() {}
+
+func (*ValueMatcher_StringMatch) isValueMatcher_MatchPattern() {}
+
+func (*ValueMatcher_BoolMatch) isValueMatcher_MatchPattern() {}
+
+func (*ValueMatcher_PresentMatch) isValueMatcher_MatchPattern() {}
+
+func (*ValueMatcher_ListMatch) isValueMatcher_MatchPattern() {}
+
+// Specifies the way to match a list value.
+type ListMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatchPattern:
+ // *ListMatcher_OneOf
+ MatchPattern isListMatcher_MatchPattern `protobuf_oneof:"match_pattern"`
+}
+
+func (x *ListMatcher) Reset() {
+ *x = ListMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMatcher) ProtoMessage() {}
+
+func (x *ListMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMatcher.ProtoReflect.Descriptor instead.
+func (*ListMatcher) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *ListMatcher) GetMatchPattern() isListMatcher_MatchPattern {
+ if m != nil {
+ return m.MatchPattern
+ }
+ return nil
+}
+
+func (x *ListMatcher) GetOneOf() *ValueMatcher {
+ if x, ok := x.GetMatchPattern().(*ListMatcher_OneOf); ok {
+ return x.OneOf
+ }
+ return nil
+}
+
+type isListMatcher_MatchPattern interface {
+ isListMatcher_MatchPattern()
+}
+
+type ListMatcher_OneOf struct {
+ // If specified, at least one of the values in the list must match the value specified.
+ OneOf *ValueMatcher `protobuf:"bytes,1,opt,name=one_of,json=oneOf,proto3,oneof"`
+}
+
+func (*ListMatcher_OneOf) isListMatcher_MatchPattern() {}
+
+// NullMatch is an empty message to specify a null value.
+type ValueMatcher_NullMatch struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ValueMatcher_NullMatch) Reset() {
+ *x = ValueMatcher_NullMatch{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValueMatcher_NullMatch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValueMatcher_NullMatch) ProtoMessage() {}
+
+func (x *ValueMatcher_NullMatch) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValueMatcher_NullMatch.ProtoReflect.Descriptor instead.
+func (*ValueMatcher_NullMatch) Descriptor() ([]byte, []int) {
+ return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{0, 0}
+}
+
+var File_envoy_type_matcher_v3_value_proto protoreflect.FileDescriptor
+
+var file_envoy_type_matcher_v3_value_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76,
+ 0x33, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfe, 0x03,
+ 0x0a, 0x0c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e,
+ 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x49,
+ 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f,
+ 0x75, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c,
+ 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x43, 0x0a, 0x0a,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x1a, 0x3d, 0x0a, 0x09, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x30,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x88,
+ 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3c,
+ 0x0a, 0x06, 0x6f, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x6e, 0x65, 0x4f, 0x66, 0x3a, 0x25, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74,
+ 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x3b, 0x0a, 0x23, 0x69, 0x6f, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
+ 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80,
+ 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_matcher_v3_value_proto_rawDescOnce sync.Once
+ file_envoy_type_matcher_v3_value_proto_rawDescData = file_envoy_type_matcher_v3_value_proto_rawDesc
+)
+
+func file_envoy_type_matcher_v3_value_proto_rawDescGZIP() []byte {
+ file_envoy_type_matcher_v3_value_proto_rawDescOnce.Do(func() {
+ file_envoy_type_matcher_v3_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_value_proto_rawDescData)
+ })
+ return file_envoy_type_matcher_v3_value_proto_rawDescData
+}
+
+var file_envoy_type_matcher_v3_value_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_envoy_type_matcher_v3_value_proto_goTypes = []interface{}{
+ (*ValueMatcher)(nil), // 0: envoy.type.matcher.v3.ValueMatcher
+ (*ListMatcher)(nil), // 1: envoy.type.matcher.v3.ListMatcher
+ (*ValueMatcher_NullMatch)(nil), // 2: envoy.type.matcher.v3.ValueMatcher.NullMatch
+ (*DoubleMatcher)(nil), // 3: envoy.type.matcher.v3.DoubleMatcher
+ (*StringMatcher)(nil), // 4: envoy.type.matcher.v3.StringMatcher
+}
+var file_envoy_type_matcher_v3_value_proto_depIdxs = []int32{
+ 2, // 0: envoy.type.matcher.v3.ValueMatcher.null_match:type_name -> envoy.type.matcher.v3.ValueMatcher.NullMatch
+ 3, // 1: envoy.type.matcher.v3.ValueMatcher.double_match:type_name -> envoy.type.matcher.v3.DoubleMatcher
+ 4, // 2: envoy.type.matcher.v3.ValueMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher
+ 1, // 3: envoy.type.matcher.v3.ValueMatcher.list_match:type_name -> envoy.type.matcher.v3.ListMatcher
+ 0, // 4: envoy.type.matcher.v3.ListMatcher.one_of:type_name -> envoy.type.matcher.v3.ValueMatcher
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_matcher_v3_value_proto_init() }
+func file_envoy_type_matcher_v3_value_proto_init() {
+ if File_envoy_type_matcher_v3_value_proto != nil {
+ return
+ }
+ file_envoy_type_matcher_v3_number_proto_init()
+ file_envoy_type_matcher_v3_string_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_matcher_v3_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValueMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_matcher_v3_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValueMatcher_NullMatch); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_matcher_v3_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*ValueMatcher_NullMatch_)(nil),
+ (*ValueMatcher_DoubleMatch)(nil),
+ (*ValueMatcher_StringMatch)(nil),
+ (*ValueMatcher_BoolMatch)(nil),
+ (*ValueMatcher_PresentMatch)(nil),
+ (*ValueMatcher_ListMatch)(nil),
+ }
+ file_envoy_type_matcher_v3_value_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*ListMatcher_OneOf)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_matcher_v3_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_matcher_v3_value_proto_goTypes,
+ DependencyIndexes: file_envoy_type_matcher_v3_value_proto_depIdxs,
+ MessageInfos: file_envoy_type_matcher_v3_value_proto_msgTypes,
+ }.Build()
+ File_envoy_type_matcher_v3_value_proto = out.File
+ file_envoy_type_matcher_v3_value_proto_rawDesc = nil
+ file_envoy_type_matcher_v3_value_proto_goTypes = nil
+ file_envoy_type_matcher_v3_value_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go
new file mode 100644
index 000000000..62542ad9d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go
@@ -0,0 +1,317 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/matcher/v3/value.proto
+
+package envoy_type_matcher_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on ValueMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *ValueMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.MatchPattern.(type) {
+
+ case *ValueMatcher_NullMatch_:
+
+ if v, ok := interface{}(m.GetNullMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ValueMatcherValidationError{
+ field: "NullMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ValueMatcher_DoubleMatch:
+
+ if v, ok := interface{}(m.GetDoubleMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ValueMatcherValidationError{
+ field: "DoubleMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ValueMatcher_StringMatch:
+
+ if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ValueMatcherValidationError{
+ field: "StringMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ValueMatcher_BoolMatch:
+ // no validation rules for BoolMatch
+
+ case *ValueMatcher_PresentMatch:
+ // no validation rules for PresentMatch
+
+ case *ValueMatcher_ListMatch:
+
+ if v, ok := interface{}(m.GetListMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ValueMatcherValidationError{
+ field: "ListMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return ValueMatcherValidationError{
+ field: "MatchPattern",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// ValueMatcherValidationError is the validation error returned by
+// ValueMatcher.Validate if the designated constraints aren't met.
+type ValueMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ValueMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ValueMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ValueMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ValueMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ValueMatcherValidationError) ErrorName() string { return "ValueMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ValueMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sValueMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ValueMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ValueMatcherValidationError{}
+
+// Validate checks the field values on ListMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *ListMatcher) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.MatchPattern.(type) {
+
+ case *ListMatcher_OneOf:
+
+ if v, ok := interface{}(m.GetOneOf()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListMatcherValidationError{
+ field: "OneOf",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return ListMatcherValidationError{
+ field: "MatchPattern",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// ListMatcherValidationError is the validation error returned by
+// ListMatcher.Validate if the designated constraints aren't met.
+type ListMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListMatcherValidationError) ErrorName() string { return "ListMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ListMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListMatcherValidationError{}
+
+// Validate checks the field values on ValueMatcher_NullMatch with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *ValueMatcher_NullMatch) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// ValueMatcher_NullMatchValidationError is the validation error returned by
+// ValueMatcher_NullMatch.Validate if the designated constraints aren't met.
+type ValueMatcher_NullMatchValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ValueMatcher_NullMatchValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ValueMatcher_NullMatchValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ValueMatcher_NullMatchValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ValueMatcher_NullMatchValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ValueMatcher_NullMatchValidationError) ErrorName() string {
+ return "ValueMatcher_NullMatchValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ValueMatcher_NullMatchValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sValueMatcher_NullMatch.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ValueMatcher_NullMatchValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ValueMatcher_NullMatchValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go
new file mode 100644
index 000000000..22c98de3d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go
@@ -0,0 +1,245 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/hash_policy.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Specifies the hash policy
+type HashPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to PolicySpecifier:
+ // *HashPolicy_SourceIp_
+ PolicySpecifier isHashPolicy_PolicySpecifier `protobuf_oneof:"policy_specifier"`
+}
+
+func (x *HashPolicy) Reset() {
+ *x = HashPolicy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HashPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HashPolicy) ProtoMessage() {}
+
+func (x *HashPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HashPolicy.ProtoReflect.Descriptor instead.
+func (*HashPolicy) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *HashPolicy) GetPolicySpecifier() isHashPolicy_PolicySpecifier {
+ if m != nil {
+ return m.PolicySpecifier
+ }
+ return nil
+}
+
+func (x *HashPolicy) GetSourceIp() *HashPolicy_SourceIp {
+ if x, ok := x.GetPolicySpecifier().(*HashPolicy_SourceIp_); ok {
+ return x.SourceIp
+ }
+ return nil
+}
+
+type isHashPolicy_PolicySpecifier interface {
+ isHashPolicy_PolicySpecifier()
+}
+
+type HashPolicy_SourceIp_ struct {
+ SourceIp *HashPolicy_SourceIp `protobuf:"bytes,1,opt,name=source_ip,json=sourceIp,proto3,oneof"`
+}
+
+func (*HashPolicy_SourceIp_) isHashPolicy_PolicySpecifier() {}
+
+// The source IP will be used to compute the hash used by hash-based load balancing
+// algorithms.
+type HashPolicy_SourceIp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HashPolicy_SourceIp) Reset() {
+ *x = HashPolicy_SourceIp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HashPolicy_SourceIp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HashPolicy_SourceIp) ProtoMessage() {}
+
+func (x *HashPolicy_SourceIp) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HashPolicy_SourceIp.ProtoReflect.Descriptor instead.
+func (*HashPolicy_SourceIp) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0, 0}
+}
+
+var File_envoy_type_v3_hash_policy_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_hash_policy_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a, 0x0a,
+ 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x09, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x61,
+ 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49,
+ 0x70, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x1a, 0x31, 0x0a,
+ 0x08, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20,
+ 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x61, 0x73,
+ 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70,
+ 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17,
+ 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x38, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10,
+ 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_v3_hash_policy_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_hash_policy_proto_rawDescData = file_envoy_type_v3_hash_policy_proto_rawDesc
+)
+
+func file_envoy_type_v3_hash_policy_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_hash_policy_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_hash_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_hash_policy_proto_rawDescData)
+ })
+ return file_envoy_type_v3_hash_policy_proto_rawDescData
+}
+
+var file_envoy_type_v3_hash_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_type_v3_hash_policy_proto_goTypes = []interface{}{
+ (*HashPolicy)(nil), // 0: envoy.type.v3.HashPolicy
+ (*HashPolicy_SourceIp)(nil), // 1: envoy.type.v3.HashPolicy.SourceIp
+}
+var file_envoy_type_v3_hash_policy_proto_depIdxs = []int32{
+ 1, // 0: envoy.type.v3.HashPolicy.source_ip:type_name -> envoy.type.v3.HashPolicy.SourceIp
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_hash_policy_proto_init() }
+func file_envoy_type_v3_hash_policy_proto_init() {
+ if File_envoy_type_v3_hash_policy_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_v3_hash_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HashPolicy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_v3_hash_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HashPolicy_SourceIp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_type_v3_hash_policy_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*HashPolicy_SourceIp_)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_hash_policy_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_hash_policy_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_hash_policy_proto_depIdxs,
+ MessageInfos: file_envoy_type_v3_hash_policy_proto_msgTypes,
+ }.Build()
+ File_envoy_type_v3_hash_policy_proto = out.File
+ file_envoy_type_v3_hash_policy_proto_rawDesc = nil
+ file_envoy_type_v3_hash_policy_proto_goTypes = nil
+ file_envoy_type_v3_hash_policy_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go
new file mode 100644
index 000000000..bdbdaa231
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go
@@ -0,0 +1,187 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/hash_policy.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on HashPolicy with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *HashPolicy) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ switch m.PolicySpecifier.(type) {
+
+ case *HashPolicy_SourceIp_:
+
+ if v, ok := interface{}(m.GetSourceIp()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HashPolicyValidationError{
+ field: "SourceIp",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ return HashPolicyValidationError{
+ field: "PolicySpecifier",
+ reason: "value is required",
+ }
+
+ }
+
+ return nil
+}
+
+// HashPolicyValidationError is the validation error returned by
+// HashPolicy.Validate if the designated constraints aren't met.
+type HashPolicyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HashPolicyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HashPolicyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HashPolicyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HashPolicyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HashPolicyValidationError) ErrorName() string { return "HashPolicyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HashPolicyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHashPolicy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HashPolicyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HashPolicyValidationError{}
+
+// Validate checks the field values on HashPolicy_SourceIp with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, an error is returned.
+func (m *HashPolicy_SourceIp) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ return nil
+}
+
+// HashPolicy_SourceIpValidationError is the validation error returned by
+// HashPolicy_SourceIp.Validate if the designated constraints aren't met.
+type HashPolicy_SourceIpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HashPolicy_SourceIpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HashPolicy_SourceIpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HashPolicy_SourceIpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HashPolicy_SourceIpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HashPolicy_SourceIpValidationError) ErrorName() string {
+ return "HashPolicy_SourceIpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HashPolicy_SourceIpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHashPolicy_SourceIp.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HashPolicy_SourceIpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HashPolicy_SourceIpValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go
new file mode 100644
index 000000000..faa0dfb3b
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go
@@ -0,0 +1,145 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/http.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type CodecClientType int32
+
+const (
+ CodecClientType_HTTP1 CodecClientType = 0
+ CodecClientType_HTTP2 CodecClientType = 1
+ // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with
+ // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient
+ // to distinguish HTTP1 and HTTP2 traffic.
+ CodecClientType_HTTP3 CodecClientType = 2
+)
+
+// Enum value maps for CodecClientType.
+var (
+ CodecClientType_name = map[int32]string{
+ 0: "HTTP1",
+ 1: "HTTP2",
+ 2: "HTTP3",
+ }
+ CodecClientType_value = map[string]int32{
+ "HTTP1": 0,
+ "HTTP2": 1,
+ "HTTP3": 2,
+ }
+)
+
+func (x CodecClientType) Enum() *CodecClientType {
+ p := new(CodecClientType)
+ *p = x
+ return p
+}
+
+func (x CodecClientType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CodecClientType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_type_v3_http_proto_enumTypes[0].Descriptor()
+}
+
+func (CodecClientType) Type() protoreflect.EnumType {
+ return &file_envoy_type_v3_http_proto_enumTypes[0]
+}
+
+func (x CodecClientType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CodecClientType.Descriptor instead.
+func (CodecClientType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_type_v3_http_proto_rawDescGZIP(), []int{0}
+}
+
+var File_envoy_type_v3_http_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_http_proto_rawDesc = []byte{
+ 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, 0x32, 0x0a, 0x0f, 0x43, 0x6f, 0x64, 0x65,
+ 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x48,
+ 0x54, 0x54, 0x50, 0x31, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10,
+ 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x33, 0x10, 0x02, 0x42, 0x32, 0x0a, 0x1b,
+ 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x48, 0x74, 0x74,
+ 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_v3_http_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_http_proto_rawDescData = file_envoy_type_v3_http_proto_rawDesc
+)
+
+func file_envoy_type_v3_http_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_http_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_http_proto_rawDescData)
+ })
+ return file_envoy_type_v3_http_proto_rawDescData
+}
+
+var file_envoy_type_v3_http_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_type_v3_http_proto_goTypes = []interface{}{
+ (CodecClientType)(0), // 0: envoy.type.v3.CodecClientType
+}
+var file_envoy_type_v3_http_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_http_proto_init() }
+func file_envoy_type_v3_http_proto_init() {
+ if File_envoy_type_v3_http_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_http_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_http_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_http_proto_depIdxs,
+ EnumInfos: file_envoy_type_v3_http_proto_enumTypes,
+ }.Build()
+ File_envoy_type_v3_http_proto = out.File
+ file_envoy_type_v3_http_proto_rawDesc = nil
+ file_envoy_type_v3_http_proto_goTypes = nil
+ file_envoy_type_v3_http_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go
new file mode 100644
index 000000000..f099568bd
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go
@@ -0,0 +1,34 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/http.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go
new file mode 100644
index 000000000..8a5e88b79
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go
@@ -0,0 +1,459 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/http_status.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// HTTP response codes supported in Envoy.
+// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
+type StatusCode int32
+
+const (
+ // Empty - This code not part of the HTTP status code specification, but it is needed for proto
+ // `enum` type.
+ StatusCode_Empty StatusCode = 0
+ StatusCode_Continue StatusCode = 100
+ StatusCode_OK StatusCode = 200
+ StatusCode_Created StatusCode = 201
+ StatusCode_Accepted StatusCode = 202
+ StatusCode_NonAuthoritativeInformation StatusCode = 203
+ StatusCode_NoContent StatusCode = 204
+ StatusCode_ResetContent StatusCode = 205
+ StatusCode_PartialContent StatusCode = 206
+ StatusCode_MultiStatus StatusCode = 207
+ StatusCode_AlreadyReported StatusCode = 208
+ StatusCode_IMUsed StatusCode = 226
+ StatusCode_MultipleChoices StatusCode = 300
+ StatusCode_MovedPermanently StatusCode = 301
+ StatusCode_Found StatusCode = 302
+ StatusCode_SeeOther StatusCode = 303
+ StatusCode_NotModified StatusCode = 304
+ StatusCode_UseProxy StatusCode = 305
+ StatusCode_TemporaryRedirect StatusCode = 307
+ StatusCode_PermanentRedirect StatusCode = 308
+ StatusCode_BadRequest StatusCode = 400
+ StatusCode_Unauthorized StatusCode = 401
+ StatusCode_PaymentRequired StatusCode = 402
+ StatusCode_Forbidden StatusCode = 403
+ StatusCode_NotFound StatusCode = 404
+ StatusCode_MethodNotAllowed StatusCode = 405
+ StatusCode_NotAcceptable StatusCode = 406
+ StatusCode_ProxyAuthenticationRequired StatusCode = 407
+ StatusCode_RequestTimeout StatusCode = 408
+ StatusCode_Conflict StatusCode = 409
+ StatusCode_Gone StatusCode = 410
+ StatusCode_LengthRequired StatusCode = 411
+ StatusCode_PreconditionFailed StatusCode = 412
+ StatusCode_PayloadTooLarge StatusCode = 413
+ StatusCode_URITooLong StatusCode = 414
+ StatusCode_UnsupportedMediaType StatusCode = 415
+ StatusCode_RangeNotSatisfiable StatusCode = 416
+ StatusCode_ExpectationFailed StatusCode = 417
+ StatusCode_MisdirectedRequest StatusCode = 421
+ StatusCode_UnprocessableEntity StatusCode = 422
+ StatusCode_Locked StatusCode = 423
+ StatusCode_FailedDependency StatusCode = 424
+ StatusCode_UpgradeRequired StatusCode = 426
+ StatusCode_PreconditionRequired StatusCode = 428
+ StatusCode_TooManyRequests StatusCode = 429
+ StatusCode_RequestHeaderFieldsTooLarge StatusCode = 431
+ StatusCode_InternalServerError StatusCode = 500
+ StatusCode_NotImplemented StatusCode = 501
+ StatusCode_BadGateway StatusCode = 502
+ StatusCode_ServiceUnavailable StatusCode = 503
+ StatusCode_GatewayTimeout StatusCode = 504
+ StatusCode_HTTPVersionNotSupported StatusCode = 505
+ StatusCode_VariantAlsoNegotiates StatusCode = 506
+ StatusCode_InsufficientStorage StatusCode = 507
+ StatusCode_LoopDetected StatusCode = 508
+ StatusCode_NotExtended StatusCode = 510
+ StatusCode_NetworkAuthenticationRequired StatusCode = 511
+)
+
+// Enum value maps for StatusCode.
+var (
+ StatusCode_name = map[int32]string{
+ 0: "Empty",
+ 100: "Continue",
+ 200: "OK",
+ 201: "Created",
+ 202: "Accepted",
+ 203: "NonAuthoritativeInformation",
+ 204: "NoContent",
+ 205: "ResetContent",
+ 206: "PartialContent",
+ 207: "MultiStatus",
+ 208: "AlreadyReported",
+ 226: "IMUsed",
+ 300: "MultipleChoices",
+ 301: "MovedPermanently",
+ 302: "Found",
+ 303: "SeeOther",
+ 304: "NotModified",
+ 305: "UseProxy",
+ 307: "TemporaryRedirect",
+ 308: "PermanentRedirect",
+ 400: "BadRequest",
+ 401: "Unauthorized",
+ 402: "PaymentRequired",
+ 403: "Forbidden",
+ 404: "NotFound",
+ 405: "MethodNotAllowed",
+ 406: "NotAcceptable",
+ 407: "ProxyAuthenticationRequired",
+ 408: "RequestTimeout",
+ 409: "Conflict",
+ 410: "Gone",
+ 411: "LengthRequired",
+ 412: "PreconditionFailed",
+ 413: "PayloadTooLarge",
+ 414: "URITooLong",
+ 415: "UnsupportedMediaType",
+ 416: "RangeNotSatisfiable",
+ 417: "ExpectationFailed",
+ 421: "MisdirectedRequest",
+ 422: "UnprocessableEntity",
+ 423: "Locked",
+ 424: "FailedDependency",
+ 426: "UpgradeRequired",
+ 428: "PreconditionRequired",
+ 429: "TooManyRequests",
+ 431: "RequestHeaderFieldsTooLarge",
+ 500: "InternalServerError",
+ 501: "NotImplemented",
+ 502: "BadGateway",
+ 503: "ServiceUnavailable",
+ 504: "GatewayTimeout",
+ 505: "HTTPVersionNotSupported",
+ 506: "VariantAlsoNegotiates",
+ 507: "InsufficientStorage",
+ 508: "LoopDetected",
+ 510: "NotExtended",
+ 511: "NetworkAuthenticationRequired",
+ }
+ StatusCode_value = map[string]int32{
+ "Empty": 0,
+ "Continue": 100,
+ "OK": 200,
+ "Created": 201,
+ "Accepted": 202,
+ "NonAuthoritativeInformation": 203,
+ "NoContent": 204,
+ "ResetContent": 205,
+ "PartialContent": 206,
+ "MultiStatus": 207,
+ "AlreadyReported": 208,
+ "IMUsed": 226,
+ "MultipleChoices": 300,
+ "MovedPermanently": 301,
+ "Found": 302,
+ "SeeOther": 303,
+ "NotModified": 304,
+ "UseProxy": 305,
+ "TemporaryRedirect": 307,
+ "PermanentRedirect": 308,
+ "BadRequest": 400,
+ "Unauthorized": 401,
+ "PaymentRequired": 402,
+ "Forbidden": 403,
+ "NotFound": 404,
+ "MethodNotAllowed": 405,
+ "NotAcceptable": 406,
+ "ProxyAuthenticationRequired": 407,
+ "RequestTimeout": 408,
+ "Conflict": 409,
+ "Gone": 410,
+ "LengthRequired": 411,
+ "PreconditionFailed": 412,
+ "PayloadTooLarge": 413,
+ "URITooLong": 414,
+ "UnsupportedMediaType": 415,
+ "RangeNotSatisfiable": 416,
+ "ExpectationFailed": 417,
+ "MisdirectedRequest": 421,
+ "UnprocessableEntity": 422,
+ "Locked": 423,
+ "FailedDependency": 424,
+ "UpgradeRequired": 426,
+ "PreconditionRequired": 428,
+ "TooManyRequests": 429,
+ "RequestHeaderFieldsTooLarge": 431,
+ "InternalServerError": 500,
+ "NotImplemented": 501,
+ "BadGateway": 502,
+ "ServiceUnavailable": 503,
+ "GatewayTimeout": 504,
+ "HTTPVersionNotSupported": 505,
+ "VariantAlsoNegotiates": 506,
+ "InsufficientStorage": 507,
+ "LoopDetected": 508,
+ "NotExtended": 510,
+ "NetworkAuthenticationRequired": 511,
+ }
+)
+
+func (x StatusCode) Enum() *StatusCode {
+ p := new(StatusCode)
+ *p = x
+ return p
+}
+
+func (x StatusCode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (StatusCode) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_type_v3_http_status_proto_enumTypes[0].Descriptor()
+}
+
+func (StatusCode) Type() protoreflect.EnumType {
+ return &file_envoy_type_v3_http_status_proto_enumTypes[0]
+}
+
+func (x StatusCode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use StatusCode.Descriptor instead.
+func (StatusCode) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_type_v3_http_status_proto_rawDescGZIP(), []int{0}
+}
+
+// HTTP status.
+type HttpStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Supplies HTTP response code.
+ Code StatusCode `protobuf:"varint,1,opt,name=code,proto3,enum=envoy.type.v3.StatusCode" json:"code,omitempty"`
+}
+
+func (x *HttpStatus) Reset() {
+ *x = HttpStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_http_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpStatus) ProtoMessage() {}
+
+func (x *HttpStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_http_status_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpStatus.ProtoReflect.Descriptor instead.
+func (*HttpStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_http_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HttpStatus) GetCode() StatusCode {
+ if x != nil {
+ return x.Code
+ }
+ return StatusCode_Empty
+}
+
+var File_envoy_type_v3_http_status_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_http_status_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x65, 0x0a, 0x0a, 0x48,
+ 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x04, 0x63, 0x6f, 0x64,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f,
+ 0x64, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x00, 0x52, 0x04,
+ 0x63, 0x6f, 0x64, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2a, 0xb5, 0x09, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64,
+ 0x65, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08,
+ 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x10, 0x64, 0x12, 0x07, 0x0a, 0x02, 0x4f, 0x4b,
+ 0x10, 0xc8, 0x01, 0x12, 0x0c, 0x0a, 0x07, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x10, 0xc9,
+ 0x01, 0x12, 0x0d, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x10, 0xca, 0x01,
+ 0x12, 0x20, 0x0a, 0x1b, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61,
+ 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10,
+ 0xcb, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x4e, 0x6f, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10,
+ 0xcc, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x10, 0xcd, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c,
+ 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, 0xce, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x4d, 0x75,
+ 0x6c, 0x74, 0x69, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0xcf, 0x01, 0x12, 0x14, 0x0a, 0x0f,
+ 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10,
+ 0xd0, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x4d, 0x55, 0x73, 0x65, 0x64, 0x10, 0xe2, 0x01, 0x12,
+ 0x14, 0x0a, 0x0f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x43, 0x68, 0x6f, 0x69, 0x63,
+ 0x65, 0x73, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x50, 0x65,
+ 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x10, 0xad, 0x02, 0x12, 0x0a, 0x0a, 0x05,
+ 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0xae, 0x02, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x65, 0x65, 0x4f,
+ 0x74, 0x68, 0x65, 0x72, 0x10, 0xaf, 0x02, 0x12, 0x10, 0x0a, 0x0b, 0x4e, 0x6f, 0x74, 0x4d, 0x6f,
+ 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x10, 0xb0, 0x02, 0x12, 0x0d, 0x0a, 0x08, 0x55, 0x73, 0x65,
+ 0x50, 0x72, 0x6f, 0x78, 0x79, 0x10, 0xb1, 0x02, 0x12, 0x16, 0x0a, 0x11, 0x54, 0x65, 0x6d, 0x70,
+ 0x6f, 0x72, 0x61, 0x72, 0x79, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x10, 0xb3, 0x02,
+ 0x12, 0x16, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x64,
+ 0x69, 0x72, 0x65, 0x63, 0x74, 0x10, 0xb4, 0x02, 0x12, 0x0f, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0x90, 0x03, 0x12, 0x11, 0x0a, 0x0c, 0x55, 0x6e, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x10, 0x91, 0x03, 0x12, 0x14, 0x0a, 0x0f,
+ 0x50, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10,
+ 0x92, 0x03, 0x12, 0x0e, 0x0a, 0x09, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x10,
+ 0x93, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0x94,
+ 0x03, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x6f, 0x74, 0x41, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x10, 0x95, 0x03, 0x12, 0x12, 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x41,
+ 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x10, 0x96, 0x03, 0x12, 0x20, 0x0a, 0x1b,
+ 0x50, 0x72, 0x6f, 0x78, 0x79, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0x97, 0x03, 0x12, 0x13,
+ 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x10, 0x98, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x10,
+ 0x99, 0x03, 0x12, 0x09, 0x0a, 0x04, 0x47, 0x6f, 0x6e, 0x65, 0x10, 0x9a, 0x03, 0x12, 0x13, 0x0a,
+ 0x0e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10,
+ 0x9b, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x9c, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x50,
+ 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x6f, 0x4c, 0x61, 0x72, 0x67, 0x65, 0x10, 0x9d,
+ 0x03, 0x12, 0x0f, 0x0a, 0x0a, 0x55, 0x52, 0x49, 0x54, 0x6f, 0x6f, 0x4c, 0x6f, 0x6e, 0x67, 0x10,
+ 0x9e, 0x03, 0x12, 0x19, 0x0a, 0x14, 0x55, 0x6e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65,
+ 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x10, 0x9f, 0x03, 0x12, 0x18, 0x0a,
+ 0x13, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4e, 0x6f, 0x74, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69,
+ 0x61, 0x62, 0x6c, 0x65, 0x10, 0xa0, 0x03, 0x12, 0x16, 0x0a, 0x11, 0x45, 0x78, 0x70, 0x65, 0x63,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0xa1, 0x03, 0x12,
+ 0x17, 0x0a, 0x12, 0x4d, 0x69, 0x73, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0xa5, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x6e, 0x70, 0x72,
+ 0x6f, 0x63, 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x10,
+ 0xa6, 0x03, 0x12, 0x0b, 0x0a, 0x06, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x10, 0xa7, 0x03, 0x12,
+ 0x15, 0x0a, 0x10, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
+ 0x6e, 0x63, 0x79, 0x10, 0xa8, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xaa, 0x03, 0x12, 0x19, 0x0a, 0x14,
+ 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x64, 0x10, 0xac, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x54, 0x6f, 0x6f, 0x4d, 0x61,
+ 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x10, 0xad, 0x03, 0x12, 0x20, 0x0a,
+ 0x1b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x73, 0x54, 0x6f, 0x6f, 0x4c, 0x61, 0x72, 0x67, 0x65, 0x10, 0xaf, 0x03, 0x12,
+ 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0xf4, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x6f, 0x74,
+ 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x10, 0xf5, 0x03, 0x12, 0x0f,
+ 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x10, 0xf6, 0x03, 0x12,
+ 0x17, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69,
+ 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x10, 0xf7, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x10, 0xf8, 0x03, 0x12, 0x1c, 0x0a,
+ 0x17, 0x48, 0x54, 0x54, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x53,
+ 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, 0xf9, 0x03, 0x12, 0x1a, 0x0a, 0x15, 0x56,
+ 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x41, 0x6c, 0x73, 0x6f, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69,
+ 0x61, 0x74, 0x65, 0x73, 0x10, 0xfa, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x73, 0x75, 0x66,
+ 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x10, 0xfb,
+ 0x03, 0x12, 0x11, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65,
+ 0x64, 0x10, 0xfc, 0x03, 0x12, 0x10, 0x0a, 0x0b, 0x4e, 0x6f, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x64, 0x65, 0x64, 0x10, 0xfe, 0x03, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xff, 0x03, 0x42, 0x38, 0x0a, 0x1b, 0x69, 0x6f,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_v3_http_status_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_http_status_proto_rawDescData = file_envoy_type_v3_http_status_proto_rawDesc
+)
+
+func file_envoy_type_v3_http_status_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_http_status_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_http_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_http_status_proto_rawDescData)
+ })
+ return file_envoy_type_v3_http_status_proto_rawDescData
+}
+
+var file_envoy_type_v3_http_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_type_v3_http_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_type_v3_http_status_proto_goTypes = []interface{}{
+ (StatusCode)(0), // 0: envoy.type.v3.StatusCode
+ (*HttpStatus)(nil), // 1: envoy.type.v3.HttpStatus
+}
+var file_envoy_type_v3_http_status_proto_depIdxs = []int32{
+ 0, // 0: envoy.type.v3.HttpStatus.code:type_name -> envoy.type.v3.StatusCode
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_http_status_proto_init() }
+func file_envoy_type_v3_http_status_proto_init() {
+ if File_envoy_type_v3_http_status_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_v3_http_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_http_status_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_http_status_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_http_status_proto_depIdxs,
+ EnumInfos: file_envoy_type_v3_http_status_proto_enumTypes,
+ MessageInfos: file_envoy_type_v3_http_status_proto_msgTypes,
+ }.Build()
+ File_envoy_type_v3_http_status_proto = out.File
+ file_envoy_type_v3_http_status_proto_rawDesc = nil
+ file_envoy_type_v3_http_status_proto_goTypes = nil
+ file_envoy_type_v3_http_status_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go
new file mode 100644
index 000000000..530eb9aa3
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go
@@ -0,0 +1,116 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/http_status.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on HttpStatus with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *HttpStatus) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if _, ok := _HttpStatus_Code_NotInLookup[m.GetCode()]; ok {
+ return HttpStatusValidationError{
+ field: "Code",
+ reason: "value must not be in list [0]",
+ }
+ }
+
+ if _, ok := StatusCode_name[int32(m.GetCode())]; !ok {
+ return HttpStatusValidationError{
+ field: "Code",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ return nil
+}
+
+// HttpStatusValidationError is the validation error returned by
+// HttpStatus.Validate if the designated constraints aren't met.
+type HttpStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpStatusValidationError) ErrorName() string { return "HttpStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HttpStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpStatusValidationError{}
+
+var _HttpStatus_Code_NotInLookup = map[StatusCode]struct{}{
+ 0: {},
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go
new file mode 100644
index 000000000..bfe6ec110
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go
@@ -0,0 +1,318 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/percent.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Fraction percentages support several fixed denominator values.
+type FractionalPercent_DenominatorType int32
+
+const (
+ // 100.
+ //
+ // **Example**: 1/100 = 1%.
+ FractionalPercent_HUNDRED FractionalPercent_DenominatorType = 0
+ // 10,000.
+ //
+ // **Example**: 1/10000 = 0.01%.
+ FractionalPercent_TEN_THOUSAND FractionalPercent_DenominatorType = 1
+ // 1,000,000.
+ //
+ // **Example**: 1/1000000 = 0.0001%.
+ FractionalPercent_MILLION FractionalPercent_DenominatorType = 2
+)
+
+// Enum value maps for FractionalPercent_DenominatorType.
+var (
+ FractionalPercent_DenominatorType_name = map[int32]string{
+ 0: "HUNDRED",
+ 1: "TEN_THOUSAND",
+ 2: "MILLION",
+ }
+ FractionalPercent_DenominatorType_value = map[string]int32{
+ "HUNDRED": 0,
+ "TEN_THOUSAND": 1,
+ "MILLION": 2,
+ }
+)
+
+func (x FractionalPercent_DenominatorType) Enum() *FractionalPercent_DenominatorType {
+ p := new(FractionalPercent_DenominatorType)
+ *p = x
+ return p
+}
+
+func (x FractionalPercent_DenominatorType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FractionalPercent_DenominatorType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_type_v3_percent_proto_enumTypes[0].Descriptor()
+}
+
+func (FractionalPercent_DenominatorType) Type() protoreflect.EnumType {
+ return &file_envoy_type_v3_percent_proto_enumTypes[0]
+}
+
+func (x FractionalPercent_DenominatorType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use FractionalPercent_DenominatorType.Descriptor instead.
+func (FractionalPercent_DenominatorType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// Identifies a percentage, in the range [0.0, 100.0].
+type Percent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Percent) Reset() {
+ *x = Percent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_percent_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Percent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Percent) ProtoMessage() {}
+
+func (x *Percent) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_percent_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Percent.ProtoReflect.Descriptor instead.
+func (*Percent) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Percent) GetValue() float64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+// A fractional percentage is used in cases in which for performance reasons performing floating
+// point to integer conversions during randomness calculations is undesirable. The message includes
+// both a numerator and denominator that together determine the final fractional value.
+//
+// * **Example**: 1/100 = 1%.
+// * **Example**: 3/10000 = 0.03%.
+type FractionalPercent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies the numerator. Defaults to 0.
+ Numerator uint32 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"`
+ // Specifies the denominator. If the denominator specified is less than the numerator, the final
+ // fractional percentage is capped at 1 (100%).
+ Denominator FractionalPercent_DenominatorType `protobuf:"varint,2,opt,name=denominator,proto3,enum=envoy.type.v3.FractionalPercent_DenominatorType" json:"denominator,omitempty"`
+}
+
+func (x *FractionalPercent) Reset() {
+ *x = FractionalPercent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_percent_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FractionalPercent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FractionalPercent) ProtoMessage() {}
+
+func (x *FractionalPercent) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_percent_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FractionalPercent.ProtoReflect.Descriptor instead.
+func (*FractionalPercent) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FractionalPercent) GetNumerator() uint32 {
+ if x != nil {
+ return x.Numerator
+ }
+ return 0
+}
+
+func (x *FractionalPercent) GetDenominator() FractionalPercent_DenominatorType {
+ if x != nil {
+ return x.Denominator
+ }
+ return FractionalPercent_HUNDRED
+}
+
+var File_envoy_type_v3_percent_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_percent_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x53, 0x0a, 0x07, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59,
+ 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x19, 0x9a, 0xc5, 0x88, 0x1e, 0x14, 0x0a, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0xf3, 0x01, 0x0a,
+ 0x11, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72,
+ 0x12, 0x5c, 0x0a, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61,
+ 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x3d,
+ 0x0a, 0x0f, 0x44, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x55, 0x4e, 0x44, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10,
+ 0x0a, 0x0c, 0x54, 0x45, 0x4e, 0x5f, 0x54, 0x48, 0x4f, 0x55, 0x53, 0x41, 0x4e, 0x44, 0x10, 0x01,
+ 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x49, 0x4c, 0x4c, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x3a, 0x23, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x42, 0x35, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76,
+ 0x33, 0x42, 0x0c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_envoy_type_v3_percent_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_percent_proto_rawDescData = file_envoy_type_v3_percent_proto_rawDesc
+)
+
+func file_envoy_type_v3_percent_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_percent_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_percent_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_percent_proto_rawDescData)
+ })
+ return file_envoy_type_v3_percent_proto_rawDescData
+}
+
+var file_envoy_type_v3_percent_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_type_v3_percent_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_type_v3_percent_proto_goTypes = []interface{}{
+ (FractionalPercent_DenominatorType)(0), // 0: envoy.type.v3.FractionalPercent.DenominatorType
+ (*Percent)(nil), // 1: envoy.type.v3.Percent
+ (*FractionalPercent)(nil), // 2: envoy.type.v3.FractionalPercent
+}
+var file_envoy_type_v3_percent_proto_depIdxs = []int32{
+ 0, // 0: envoy.type.v3.FractionalPercent.denominator:type_name -> envoy.type.v3.FractionalPercent.DenominatorType
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_percent_proto_init() }
+func file_envoy_type_v3_percent_proto_init() {
+ if File_envoy_type_v3_percent_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_v3_percent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Percent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_v3_percent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FractionalPercent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_percent_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_percent_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_percent_proto_depIdxs,
+ EnumInfos: file_envoy_type_v3_percent_proto_enumTypes,
+ MessageInfos: file_envoy_type_v3_percent_proto_msgTypes,
+ }.Build()
+ File_envoy_type_v3_percent_proto = out.File
+ file_envoy_type_v3_percent_proto_rawDesc = nil
+ file_envoy_type_v3_percent_proto_goTypes = nil
+ file_envoy_type_v3_percent_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go
new file mode 100644
index 000000000..f3d798171
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go
@@ -0,0 +1,181 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/percent.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on Percent with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Percent) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if val := m.GetValue(); val < 0 || val > 100 {
+ return PercentValidationError{
+ field: "Value",
+ reason: "value must be inside range [0, 100]",
+ }
+ }
+
+ return nil
+}
+
+// PercentValidationError is the validation error returned by Percent.Validate
+// if the designated constraints aren't met.
+type PercentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e PercentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e PercentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e PercentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e PercentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e PercentValidationError) ErrorName() string { return "PercentValidationError" }
+
+// Error satisfies the builtin error interface
+func (e PercentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sPercent.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = PercentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = PercentValidationError{}
+
+// Validate checks the field values on FractionalPercent with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *FractionalPercent) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Numerator
+
+ if _, ok := FractionalPercent_DenominatorType_name[int32(m.GetDenominator())]; !ok {
+ return FractionalPercentValidationError{
+ field: "Denominator",
+ reason: "value must be one of the defined enum values",
+ }
+ }
+
+ return nil
+}
+
+// FractionalPercentValidationError is the validation error returned by
+// FractionalPercent.Validate if the designated constraints aren't met.
+type FractionalPercentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FractionalPercentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FractionalPercentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FractionalPercentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FractionalPercentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FractionalPercentValidationError) ErrorName() string {
+ return "FractionalPercentValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FractionalPercentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFractionalPercent.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FractionalPercentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FractionalPercentValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go
new file mode 100644
index 000000000..437ce79c5
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go
@@ -0,0 +1,325 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/range.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Specifies the int64 start and end of the range using half-open interval semantics [start,
+// end).
+type Int64Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // start of the range (inclusive)
+ Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ // end of the range (exclusive)
+ End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *Int64Range) Reset() {
+ *x = Int64Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_range_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Range) ProtoMessage() {}
+
+func (x *Int64Range) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_range_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead.
+func (*Int64Range) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Int64Range) GetStart() int64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *Int64Range) GetEnd() int64 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+// Specifies the int32 start and end of the range using half-open interval semantics [start,
+// end).
+type Int32Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // start of the range (inclusive)
+ Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ // end of the range (exclusive)
+ End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *Int32Range) Reset() {
+ *x = Int32Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_range_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Range) ProtoMessage() {}
+
+func (x *Int32Range) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_range_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead.
+func (*Int32Range) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Int32Range) GetStart() int32 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *Int32Range) GetEnd() int32 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+// Specifies the double start and end of the range using half-open interval semantics [start,
+// end).
+type DoubleRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // start of the range (inclusive)
+ Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"`
+ // end of the range (exclusive)
+ End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *DoubleRange) Reset() {
+ *x = DoubleRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_range_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRange) ProtoMessage() {}
+
+func (x *DoubleRange) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_range_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead.
+func (*DoubleRange) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DoubleRange) GetStart() float64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *DoubleRange) GetEnd() float64 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+var File_envoy_type_v3_range_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_range_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61,
+ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x52, 0x0a, 0x0a,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65,
+ 0x6e, 0x64, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x22, 0x52, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73,
+ 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x22, 0x54, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x3a, 0x1d, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44,
+ 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x33, 0x0a, 0x1b, 0x69, 0x6f,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_v3_range_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_range_proto_rawDescData = file_envoy_type_v3_range_proto_rawDesc
+)
+
+func file_envoy_type_v3_range_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_range_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_range_proto_rawDescData)
+ })
+ return file_envoy_type_v3_range_proto_rawDescData
+}
+
+var file_envoy_type_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_envoy_type_v3_range_proto_goTypes = []interface{}{
+ (*Int64Range)(nil), // 0: envoy.type.v3.Int64Range
+ (*Int32Range)(nil), // 1: envoy.type.v3.Int32Range
+ (*DoubleRange)(nil), // 2: envoy.type.v3.DoubleRange
+}
+var file_envoy_type_v3_range_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_range_proto_init() }
+func file_envoy_type_v3_range_proto_init() {
+ if File_envoy_type_v3_range_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_type_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_range_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_range_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_range_proto_depIdxs,
+ MessageInfos: file_envoy_type_v3_range_proto_msgTypes,
+ }.Build()
+ File_envoy_type_v3_range_proto = out.File
+ file_envoy_type_v3_range_proto_rawDesc = nil
+ file_envoy_type_v3_range_proto_goTypes = nil
+ file_envoy_type_v3_range_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go
new file mode 100644
index 000000000..de64c2704
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go
@@ -0,0 +1,239 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/range.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on Int64Range with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Int64Range) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ return nil
+}
+
+// Int64RangeValidationError is the validation error returned by
+// Int64Range.Validate if the designated constraints aren't met.
+type Int64RangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int64RangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int64RangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int64RangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int64RangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Int64RangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt64Range.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int64RangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int64RangeValidationError{}
+
+// Validate checks the field values on Int32Range with the rules defined in the
+// proto definition for this message. If any rules are violated, an error is returned.
+func (m *Int32Range) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ return nil
+}
+
+// Int32RangeValidationError is the validation error returned by
+// Int32Range.Validate if the designated constraints aren't met.
+type Int32RangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int32RangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int32RangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int32RangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int32RangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Int32RangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt32Range.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int32RangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int32RangeValidationError{}
+
+// Validate checks the field values on DoubleRange with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *DoubleRange) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ return nil
+}
+
+// DoubleRangeValidationError is the validation error returned by
+// DoubleRange.Validate if the designated constraints aren't met.
+type DoubleRangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleRangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleRangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleRangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleRangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DoubleRangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleRange.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleRangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleRangeValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go
new file mode 100644
index 000000000..c67ba7ede
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go
@@ -0,0 +1,156 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/ratelimit_unit.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Identifies the unit of of time for rate limit.
+type RateLimitUnit int32
+
+const (
+ // The time unit is not known.
+ RateLimitUnit_UNKNOWN RateLimitUnit = 0
+ // The time unit representing a second.
+ RateLimitUnit_SECOND RateLimitUnit = 1
+ // The time unit representing a minute.
+ RateLimitUnit_MINUTE RateLimitUnit = 2
+ // The time unit representing an hour.
+ RateLimitUnit_HOUR RateLimitUnit = 3
+ // The time unit representing a day.
+ RateLimitUnit_DAY RateLimitUnit = 4
+)
+
+// Enum value maps for RateLimitUnit.
+var (
+ RateLimitUnit_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "SECOND",
+ 2: "MINUTE",
+ 3: "HOUR",
+ 4: "DAY",
+ }
+ RateLimitUnit_value = map[string]int32{
+ "UNKNOWN": 0,
+ "SECOND": 1,
+ "MINUTE": 2,
+ "HOUR": 3,
+ "DAY": 4,
+ }
+)
+
+func (x RateLimitUnit) Enum() *RateLimitUnit {
+ p := new(RateLimitUnit)
+ *p = x
+ return p
+}
+
+func (x RateLimitUnit) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RateLimitUnit) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_type_v3_ratelimit_unit_proto_enumTypes[0].Descriptor()
+}
+
+func (RateLimitUnit) Type() protoreflect.EnumType {
+ return &file_envoy_type_v3_ratelimit_unit_proto_enumTypes[0]
+}
+
+func (x RateLimitUnit) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RateLimitUnit.Descriptor instead.
+func (RateLimitUnit) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_type_v3_ratelimit_unit_proto_rawDescGZIP(), []int{0}
+}
+
+var File_envoy_type_v3_ratelimit_unit_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_ratelimit_unit_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2a, 0x47, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x55,
+ 0x6e, 0x69, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
+ 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06,
+ 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52,
+ 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x04, 0x42, 0x3b, 0x0a, 0x1b, 0x69,
+ 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x52, 0x61, 0x74, 0x65,
+ 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x55, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_type_v3_ratelimit_unit_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_ratelimit_unit_proto_rawDescData = file_envoy_type_v3_ratelimit_unit_proto_rawDesc
+)
+
+func file_envoy_type_v3_ratelimit_unit_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_ratelimit_unit_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_ratelimit_unit_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_ratelimit_unit_proto_rawDescData)
+ })
+ return file_envoy_type_v3_ratelimit_unit_proto_rawDescData
+}
+
+var file_envoy_type_v3_ratelimit_unit_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_type_v3_ratelimit_unit_proto_goTypes = []interface{}{
+ (RateLimitUnit)(0), // 0: envoy.type.v3.RateLimitUnit
+}
+var file_envoy_type_v3_ratelimit_unit_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_ratelimit_unit_proto_init() }
+func file_envoy_type_v3_ratelimit_unit_proto_init() {
+ if File_envoy_type_v3_ratelimit_unit_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_ratelimit_unit_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_ratelimit_unit_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_ratelimit_unit_proto_depIdxs,
+ EnumInfos: file_envoy_type_v3_ratelimit_unit_proto_enumTypes,
+ }.Build()
+ File_envoy_type_v3_ratelimit_unit_proto = out.File
+ file_envoy_type_v3_ratelimit_unit_proto_rawDesc = nil
+ file_envoy_type_v3_ratelimit_unit_proto_goTypes = nil
+ file_envoy_type_v3_ratelimit_unit_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go
new file mode 100644
index 000000000..00bc27fad
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go
@@ -0,0 +1,34 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/ratelimit_unit.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go
new file mode 100644
index 000000000..2f43dfde0
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go
@@ -0,0 +1,183 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/semantic_version.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate
+// expected behaviors and APIs, the patch version field is used only
+// for security fixes and can be generally ignored.
+type SemanticVersion struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MajorNumber uint32 `protobuf:"varint,1,opt,name=major_number,json=majorNumber,proto3" json:"major_number,omitempty"`
+ MinorNumber uint32 `protobuf:"varint,2,opt,name=minor_number,json=minorNumber,proto3" json:"minor_number,omitempty"`
+ Patch uint32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
+}
+
+func (x *SemanticVersion) Reset() {
+ *x = SemanticVersion{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_semantic_version_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SemanticVersion) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SemanticVersion) ProtoMessage() {}
+
+func (x *SemanticVersion) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_semantic_version_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SemanticVersion.ProtoReflect.Descriptor instead.
+func (*SemanticVersion) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_semantic_version_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SemanticVersion) GetMajorNumber() uint32 {
+ if x != nil {
+ return x.MajorNumber
+ }
+ return 0
+}
+
+func (x *SemanticVersion) GetMinorNumber() uint32 {
+ if x != nil {
+ return x.MinorNumber
+ }
+ return 0
+}
+
+func (x *SemanticVersion) GetPatch() uint32 {
+ if x != nil {
+ return x.Patch
+ }
+ return 0
+}
+
+var File_envoy_type_v3_semantic_version_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_semantic_version_proto_rawDesc = []byte{
+ 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x0f, 0x53, 0x65, 0x6d, 0x61,
+ 0x6e, 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d,
+ 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+ 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3d, 0x0a, 0x1b, 0x69, 0x6f,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+ 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_envoy_type_v3_semantic_version_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_semantic_version_proto_rawDescData = file_envoy_type_v3_semantic_version_proto_rawDesc
+)
+
+func file_envoy_type_v3_semantic_version_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_semantic_version_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_semantic_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_semantic_version_proto_rawDescData)
+ })
+ return file_envoy_type_v3_semantic_version_proto_rawDescData
+}
+
+var file_envoy_type_v3_semantic_version_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_type_v3_semantic_version_proto_goTypes = []interface{}{
+ (*SemanticVersion)(nil), // 0: envoy.type.v3.SemanticVersion
+}
+var file_envoy_type_v3_semantic_version_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_semantic_version_proto_init() }
+func file_envoy_type_v3_semantic_version_proto_init() {
+ if File_envoy_type_v3_semantic_version_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_v3_semantic_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SemanticVersion); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_semantic_version_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_semantic_version_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_semantic_version_proto_depIdxs,
+ MessageInfos: file_envoy_type_v3_semantic_version_proto_msgTypes,
+ }.Build()
+ File_envoy_type_v3_semantic_version_proto = out.File
+ file_envoy_type_v3_semantic_version_proto_rawDesc = nil
+ file_envoy_type_v3_semantic_version_proto_goTypes = nil
+ file_envoy_type_v3_semantic_version_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go
new file mode 100644
index 000000000..0a9381744
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go
@@ -0,0 +1,105 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/semantic_version.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on SemanticVersion with the rules defined
+// in the proto definition for this message. If any rules are violated, an
+// error is returned.
+func (m *SemanticVersion) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ // no validation rules for MajorNumber
+
+ // no validation rules for MinorNumber
+
+ // no validation rules for Patch
+
+ return nil
+}
+
+// SemanticVersionValidationError is the validation error returned by
+// SemanticVersion.Validate if the designated constraints aren't met.
+type SemanticVersionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SemanticVersionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SemanticVersionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SemanticVersionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SemanticVersionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SemanticVersionValidationError) ErrorName() string { return "SemanticVersionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SemanticVersionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSemanticVersion.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SemanticVersionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SemanticVersionValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go
new file mode 100644
index 000000000..fefd3f215
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go
@@ -0,0 +1,206 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.16.0
+// source: envoy/type/v3/token_bucket.proto
+
+package envoy_type_v3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Configures a token bucket, typically used for rate limiting.
+type TokenBucket struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket
+ // initially contains.
+ MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"`
+ // The number of tokens added to the bucket during each fill interval. If not specified, defaults
+ // to a single token.
+ TokensPerFill *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=tokens_per_fill,json=tokensPerFill,proto3" json:"tokens_per_fill,omitempty"`
+ // The fill interval that tokens are added to the bucket. During each fill interval
+ // `tokens_per_fill` are added to the bucket. The bucket will never contain more than
+ // `max_tokens` tokens.
+ FillInterval *duration.Duration `protobuf:"bytes,3,opt,name=fill_interval,json=fillInterval,proto3" json:"fill_interval,omitempty"`
+}
+
+func (x *TokenBucket) Reset() {
+ *x = TokenBucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_type_v3_token_bucket_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TokenBucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TokenBucket) ProtoMessage() {}
+
+func (x *TokenBucket) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_type_v3_token_bucket_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TokenBucket.ProtoReflect.Descriptor instead.
+func (*TokenBucket) Descriptor() ([]byte, []int) {
+ return file_envoy_type_v3_token_bucket_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TokenBucket) GetMaxTokens() uint32 {
+ if x != nil {
+ return x.MaxTokens
+ }
+ return 0
+}
+
+func (x *TokenBucket) GetTokensPerFill() *wrappers.UInt32Value {
+ if x != nil {
+ return x.TokensPerFill
+ }
+ return nil
+}
+
+func (x *TokenBucket) GetFillInterval() *duration.Duration {
+ if x != nil {
+ return x.FillInterval
+ }
+ return nil
+}
+
+var File_envoy_type_v3_token_bucket_proto protoreflect.FileDescriptor
+
+var file_envoy_type_v3_token_bucket_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xef, 0x01, 0x0a,
+ 0x0b, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0a,
+ 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
+ 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x5f, 0x70,
+ 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04,
+ 0x2a, 0x02, 0x20, 0x00, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x50, 0x65, 0x72, 0x46,
+ 0x69, 0x6c, 0x6c, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a,
+ 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a,
+ 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x39,
+ 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_envoy_type_v3_token_bucket_proto_rawDescOnce sync.Once
+ file_envoy_type_v3_token_bucket_proto_rawDescData = file_envoy_type_v3_token_bucket_proto_rawDesc
+)
+
+func file_envoy_type_v3_token_bucket_proto_rawDescGZIP() []byte {
+ file_envoy_type_v3_token_bucket_proto_rawDescOnce.Do(func() {
+ file_envoy_type_v3_token_bucket_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_token_bucket_proto_rawDescData)
+ })
+ return file_envoy_type_v3_token_bucket_proto_rawDescData
+}
+
+var file_envoy_type_v3_token_bucket_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_type_v3_token_bucket_proto_goTypes = []interface{}{
+ (*TokenBucket)(nil), // 0: envoy.type.v3.TokenBucket
+ (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value
+ (*duration.Duration)(nil), // 2: google.protobuf.Duration
+}
+var file_envoy_type_v3_token_bucket_proto_depIdxs = []int32{
+ 1, // 0: envoy.type.v3.TokenBucket.tokens_per_fill:type_name -> google.protobuf.UInt32Value
+ 2, // 1: envoy.type.v3.TokenBucket.fill_interval:type_name -> google.protobuf.Duration
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_type_v3_token_bucket_proto_init() }
+func file_envoy_type_v3_token_bucket_proto_init() {
+ if File_envoy_type_v3_token_bucket_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_type_v3_token_bucket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TokenBucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_type_v3_token_bucket_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_type_v3_token_bucket_proto_goTypes,
+ DependencyIndexes: file_envoy_type_v3_token_bucket_proto_depIdxs,
+ MessageInfos: file_envoy_type_v3_token_bucket_proto_msgTypes,
+ }.Build()
+ File_envoy_type_v3_token_bucket_proto = out.File
+ file_envoy_type_v3_token_bucket_proto_rawDesc = nil
+ file_envoy_type_v3_token_bucket_proto_goTypes = nil
+ file_envoy_type_v3_token_bucket_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go
new file mode 100644
index 000000000..1f14f6804
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go
@@ -0,0 +1,145 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/type/v3/token_bucket.proto
+
+package envoy_type_v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/ptypes"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = ptypes.DynamicAny{}
+)
+
+// Validate checks the field values on TokenBucket with the rules defined in
+// the proto definition for this message. If any rules are violated, an error
+// is returned.
+func (m *TokenBucket) Validate() error {
+ if m == nil {
+ return nil
+ }
+
+ if m.GetMaxTokens() <= 0 {
+ return TokenBucketValidationError{
+ field: "MaxTokens",
+ reason: "value must be greater than 0",
+ }
+ }
+
+ if wrapper := m.GetTokensPerFill(); wrapper != nil {
+
+ if wrapper.GetValue() <= 0 {
+ return TokenBucketValidationError{
+ field: "TokensPerFill",
+ reason: "value must be greater than 0",
+ }
+ }
+
+ }
+
+ if m.GetFillInterval() == nil {
+ return TokenBucketValidationError{
+ field: "FillInterval",
+ reason: "value is required",
+ }
+ }
+
+ if d := m.GetFillInterval(); d != nil {
+ dur, err := ptypes.Duration(d)
+ if err != nil {
+ return TokenBucketValidationError{
+ field: "FillInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ }
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ return TokenBucketValidationError{
+ field: "FillInterval",
+ reason: "value must be greater than 0s",
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// TokenBucketValidationError is the validation error returned by
+// TokenBucket.Validate if the designated constraints aren't met.
+type TokenBucketValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TokenBucketValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TokenBucketValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TokenBucketValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TokenBucketValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TokenBucketValidationError) ErrorName() string { return "TokenBucketValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TokenBucketValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTokenBucket.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TokenBucketValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TokenBucketValidationError{}
diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE b/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/NOTICE b/vendor/github.com/envoyproxy/protoc-gen-validate/NOTICE
new file mode 100644
index 000000000..60884a059
--- /dev/null
+++ b/vendor/github.com/envoyproxy/protoc-gen-validate/NOTICE
@@ -0,0 +1,4 @@
+protoc-gen-validate
+Copyright 2019 Envoy Project Authors
+
+Licensed under Apache License 2.0. See LICENSE for terms.
diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD
new file mode 100644
index 000000000..4e3837e20
--- /dev/null
+++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD
@@ -0,0 +1,47 @@
+load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library")
+load("@rules_java//java:defs.bzl", "java_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
+
+package(
+ default_visibility =
+ ["//visibility:public"],
+)
+
+proto_library(
+ name = "validate_proto",
+ srcs = ["validate.proto"],
+ deps = [
+ "@com_google_protobuf//:descriptor_proto",
+ "@com_google_protobuf//:duration_proto",
+ "@com_google_protobuf//:timestamp_proto",
+ ],
+)
+
+cc_proto_library(
+ name = "validate_cc",
+ deps = [":validate_proto"],
+)
+
+py_proto_library(
+ name = "validate_py",
+ srcs = ["validate.proto"],
+ deps = ["@com_google_protobuf//:protobuf_python"],
+)
+
+go_proto_library(
+ name = "go_default_library",
+ importpath = "github.com/envoyproxy/protoc-gen-validate/validate",
+ proto = ":validate_proto",
+)
+
+cc_library(
+ name = "cc_validate",
+ hdrs = ["validate.h"],
+)
+
+java_proto_library(
+ name = "validate_java",
+ deps = [":validate_proto"],
+)
diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h
new file mode 100644
index 000000000..b8594978d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h
@@ -0,0 +1,156 @@
+#ifndef _VALIDATE_H
+#define _VALIDATE_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if !defined(_WIN32)
+#include
+#else
+#include
+#include
+
+// uses macros to #define a ton of symbols,
+// many of which interfere with our code here and down
+// the line in various extensions.
+#undef DELETE
+#undef ERROR
+#undef GetMessage
+#undef interface
+#undef TRUE
+
+#endif
+
+#include "google/protobuf/stubs/strutil.h" // for UTF8Len
+
+namespace pgv {
+using std::string;
+
+class UnimplementedException : public std::runtime_error {
+public:
+ UnimplementedException() : std::runtime_error("not yet implemented") {}
+ UnimplementedException(const std::string& message) : std::runtime_error(message) {}
+ // Thrown by C++ validation code that is not yet implemented.
+};
+
+using ValidationMsg = std::string;
+
+class BaseValidator {
+protected:
+ static std::unordered_map& validators() {
+ static auto* validator_map = new std::unordered_map();
+ return *validator_map;
+ }
+};
+
+template
+class Validator : public BaseValidator {
+public:
+ Validator(std::function check) : check_(check)
+ {
+ validators()[std::type_index(typeid(T))] = this;
+ }
+
+ static bool CheckMessage(const T& m, ValidationMsg* err)
+ {
+ auto val = static_cast*>(validators()[std::type_index(typeid(T))]);
+ if (val) {
+ return val->check_(m, err);
+ }
+ return true;
+ }
+
+private:
+ std::function check_;
+};
+
+static inline std::string String(const ValidationMsg& msg)
+{
+ return std::string(msg);
+}
+
+static inline bool IsPrefix(const string& maybe_prefix, const string& search_in)
+{
+ return search_in.compare(0, maybe_prefix.size(), maybe_prefix) == 0;
+}
+
+static inline bool IsSuffix(const string& maybe_suffix, const string& search_in)
+{
+ return maybe_suffix.size() <= search_in.size() && search_in.compare(search_in.size() - maybe_suffix.size(), maybe_suffix.size(), maybe_suffix) == 0;
+}
+
+static inline bool Contains(const string& search_in, const string& to_find)
+{
+ return search_in.find(to_find) != string::npos;
+}
+
+static inline bool NotContains(const string& search_in, const string& to_find)
+{
+ return !Contains(search_in, to_find);
+}
+
+static inline bool IsIpv4(const string& to_validate) {
+ struct sockaddr_in sa;
+ return !(inet_pton(AF_INET, to_validate.c_str(), &sa.sin_addr) < 1);
+}
+
+static inline bool IsIpv6(const string& to_validate) {
+ struct sockaddr_in6 sa_six;
+ return !(inet_pton(AF_INET6, to_validate.c_str(), &sa_six.sin6_addr) < 1);
+}
+
+static inline bool IsIp(const string& to_validate) {
+ return IsIpv4(to_validate) || IsIpv6(to_validate);
+}
+
+static inline bool IsHostname(const string& to_validate) {
+ if (to_validate.length() > 253) {
+ return false;
+ }
+
+ const std::regex dot_regex{"\\."};
+ const auto iter_end = std::sregex_token_iterator();
+ auto iter = std::sregex_token_iterator(to_validate.begin(), to_validate.end(), dot_regex, -1);
+ for (; iter != iter_end; ++iter) {
+ const std::string &part = *iter;
+ if (part.empty() || part.length() > 63) {
+ return false;
+ }
+ if (part.at(0) == '-') {
+ return false;
+ }
+ if (part.at(part.length() - 1) == '-') {
+ return false;
+ }
+ for (const auto &character : part) {
+ if ((character < 'A' || character > 'Z') && (character < 'a' || character > 'z') && (character < '0' || character > '9') && character != '-') {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static inline size_t Utf8Len(const string& narrow_string) {
+ const char *str_char = narrow_string.c_str();
+ ptrdiff_t byte_len = narrow_string.length();
+ size_t unicode_len = 0;
+ int char_len = 1;
+ while (byte_len > 0 && char_len > 0) {
+ char_len = google::protobuf::UTF8FirstLetterNumBytes(str_char, byte_len);
+ str_char += char_len;
+ byte_len -= char_len;
+ ++unicode_len;
+ }
+ return unicode_len;
+}
+
+} // namespace pgv
+
+#endif // _VALIDATE_H
diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go
new file mode 100644
index 000000000..d4524fb5e
--- /dev/null
+++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go
@@ -0,0 +1,4104 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.15.5
+// source: validate/validate.proto
+
+package validate
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// WellKnownRegex contain some well-known patterns.
+type KnownRegex int32
+
+const (
+ KnownRegex_UNKNOWN KnownRegex = 0
+ // HTTP header name as defined by RFC 7230.
+ KnownRegex_HTTP_HEADER_NAME KnownRegex = 1
+ // HTTP header value as defined by RFC 7230.
+ KnownRegex_HTTP_HEADER_VALUE KnownRegex = 2
+)
+
+// Enum value maps for KnownRegex.
+var (
+ KnownRegex_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "HTTP_HEADER_NAME",
+ 2: "HTTP_HEADER_VALUE",
+ }
+ KnownRegex_value = map[string]int32{
+ "UNKNOWN": 0,
+ "HTTP_HEADER_NAME": 1,
+ "HTTP_HEADER_VALUE": 2,
+ }
+)
+
+func (x KnownRegex) Enum() *KnownRegex {
+ p := new(KnownRegex)
+ *p = x
+ return p
+}
+
+func (x KnownRegex) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (KnownRegex) Descriptor() protoreflect.EnumDescriptor {
+ return file_validate_validate_proto_enumTypes[0].Descriptor()
+}
+
+func (KnownRegex) Type() protoreflect.EnumType {
+ return &file_validate_validate_proto_enumTypes[0]
+}
+
+func (x KnownRegex) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *KnownRegex) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = KnownRegex(num)
+ return nil
+}
+
+// Deprecated: Use KnownRegex.Descriptor instead.
+func (KnownRegex) EnumDescriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{0}
+}
+
+// FieldRules encapsulates the rules for each type of field. Depending on the
+// field, the correct set should be used to ensure proper validations.
+type FieldRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Message *MessageRules `protobuf:"bytes,17,opt,name=message" json:"message,omitempty"`
+ // Types that are assignable to Type:
+ // *FieldRules_Float
+ // *FieldRules_Double
+ // *FieldRules_Int32
+ // *FieldRules_Int64
+ // *FieldRules_Uint32
+ // *FieldRules_Uint64
+ // *FieldRules_Sint32
+ // *FieldRules_Sint64
+ // *FieldRules_Fixed32
+ // *FieldRules_Fixed64
+ // *FieldRules_Sfixed32
+ // *FieldRules_Sfixed64
+ // *FieldRules_Bool
+ // *FieldRules_String_
+ // *FieldRules_Bytes
+ // *FieldRules_Enum
+ // *FieldRules_Repeated
+ // *FieldRules_Map
+ // *FieldRules_Any
+ // *FieldRules_Duration
+ // *FieldRules_Timestamp
+ Type isFieldRules_Type `protobuf_oneof:"type"`
+}
+
+func (x *FieldRules) Reset() {
+ *x = FieldRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldRules) ProtoMessage() {}
+
+func (x *FieldRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldRules.ProtoReflect.Descriptor instead.
+func (*FieldRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldRules) GetMessage() *MessageRules {
+ if x != nil {
+ return x.Message
+ }
+ return nil
+}
+
+func (m *FieldRules) GetType() isFieldRules_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *FieldRules) GetFloat() *FloatRules {
+ if x, ok := x.GetType().(*FieldRules_Float); ok {
+ return x.Float
+ }
+ return nil
+}
+
+func (x *FieldRules) GetDouble() *DoubleRules {
+ if x, ok := x.GetType().(*FieldRules_Double); ok {
+ return x.Double
+ }
+ return nil
+}
+
+func (x *FieldRules) GetInt32() *Int32Rules {
+ if x, ok := x.GetType().(*FieldRules_Int32); ok {
+ return x.Int32
+ }
+ return nil
+}
+
+func (x *FieldRules) GetInt64() *Int64Rules {
+ if x, ok := x.GetType().(*FieldRules_Int64); ok {
+ return x.Int64
+ }
+ return nil
+}
+
+func (x *FieldRules) GetUint32() *UInt32Rules {
+ if x, ok := x.GetType().(*FieldRules_Uint32); ok {
+ return x.Uint32
+ }
+ return nil
+}
+
+func (x *FieldRules) GetUint64() *UInt64Rules {
+ if x, ok := x.GetType().(*FieldRules_Uint64); ok {
+ return x.Uint64
+ }
+ return nil
+}
+
+func (x *FieldRules) GetSint32() *SInt32Rules {
+ if x, ok := x.GetType().(*FieldRules_Sint32); ok {
+ return x.Sint32
+ }
+ return nil
+}
+
+func (x *FieldRules) GetSint64() *SInt64Rules {
+ if x, ok := x.GetType().(*FieldRules_Sint64); ok {
+ return x.Sint64
+ }
+ return nil
+}
+
+func (x *FieldRules) GetFixed32() *Fixed32Rules {
+ if x, ok := x.GetType().(*FieldRules_Fixed32); ok {
+ return x.Fixed32
+ }
+ return nil
+}
+
+func (x *FieldRules) GetFixed64() *Fixed64Rules {
+ if x, ok := x.GetType().(*FieldRules_Fixed64); ok {
+ return x.Fixed64
+ }
+ return nil
+}
+
+func (x *FieldRules) GetSfixed32() *SFixed32Rules {
+ if x, ok := x.GetType().(*FieldRules_Sfixed32); ok {
+ return x.Sfixed32
+ }
+ return nil
+}
+
+func (x *FieldRules) GetSfixed64() *SFixed64Rules {
+ if x, ok := x.GetType().(*FieldRules_Sfixed64); ok {
+ return x.Sfixed64
+ }
+ return nil
+}
+
+func (x *FieldRules) GetBool() *BoolRules {
+ if x, ok := x.GetType().(*FieldRules_Bool); ok {
+ return x.Bool
+ }
+ return nil
+}
+
+func (x *FieldRules) GetString_() *StringRules {
+ if x, ok := x.GetType().(*FieldRules_String_); ok {
+ return x.String_
+ }
+ return nil
+}
+
+func (x *FieldRules) GetBytes() *BytesRules {
+ if x, ok := x.GetType().(*FieldRules_Bytes); ok {
+ return x.Bytes
+ }
+ return nil
+}
+
+func (x *FieldRules) GetEnum() *EnumRules {
+ if x, ok := x.GetType().(*FieldRules_Enum); ok {
+ return x.Enum
+ }
+ return nil
+}
+
+func (x *FieldRules) GetRepeated() *RepeatedRules {
+ if x, ok := x.GetType().(*FieldRules_Repeated); ok {
+ return x.Repeated
+ }
+ return nil
+}
+
+func (x *FieldRules) GetMap() *MapRules {
+ if x, ok := x.GetType().(*FieldRules_Map); ok {
+ return x.Map
+ }
+ return nil
+}
+
+func (x *FieldRules) GetAny() *AnyRules {
+ if x, ok := x.GetType().(*FieldRules_Any); ok {
+ return x.Any
+ }
+ return nil
+}
+
+func (x *FieldRules) GetDuration() *DurationRules {
+ if x, ok := x.GetType().(*FieldRules_Duration); ok {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *FieldRules) GetTimestamp() *TimestampRules {
+ if x, ok := x.GetType().(*FieldRules_Timestamp); ok {
+ return x.Timestamp
+ }
+ return nil
+}
+
+type isFieldRules_Type interface {
+ isFieldRules_Type()
+}
+
+type FieldRules_Float struct {
+ // Scalar Field Types
+ Float *FloatRules `protobuf:"bytes,1,opt,name=float,oneof"`
+}
+
+type FieldRules_Double struct {
+ Double *DoubleRules `protobuf:"bytes,2,opt,name=double,oneof"`
+}
+
+type FieldRules_Int32 struct {
+ Int32 *Int32Rules `protobuf:"bytes,3,opt,name=int32,oneof"`
+}
+
+type FieldRules_Int64 struct {
+ Int64 *Int64Rules `protobuf:"bytes,4,opt,name=int64,oneof"`
+}
+
+type FieldRules_Uint32 struct {
+ Uint32 *UInt32Rules `protobuf:"bytes,5,opt,name=uint32,oneof"`
+}
+
+type FieldRules_Uint64 struct {
+ Uint64 *UInt64Rules `protobuf:"bytes,6,opt,name=uint64,oneof"`
+}
+
+type FieldRules_Sint32 struct {
+ Sint32 *SInt32Rules `protobuf:"bytes,7,opt,name=sint32,oneof"`
+}
+
+type FieldRules_Sint64 struct {
+ Sint64 *SInt64Rules `protobuf:"bytes,8,opt,name=sint64,oneof"`
+}
+
+type FieldRules_Fixed32 struct {
+ Fixed32 *Fixed32Rules `protobuf:"bytes,9,opt,name=fixed32,oneof"`
+}
+
+type FieldRules_Fixed64 struct {
+ Fixed64 *Fixed64Rules `protobuf:"bytes,10,opt,name=fixed64,oneof"`
+}
+
+type FieldRules_Sfixed32 struct {
+ Sfixed32 *SFixed32Rules `protobuf:"bytes,11,opt,name=sfixed32,oneof"`
+}
+
+type FieldRules_Sfixed64 struct {
+ Sfixed64 *SFixed64Rules `protobuf:"bytes,12,opt,name=sfixed64,oneof"`
+}
+
+type FieldRules_Bool struct {
+ Bool *BoolRules `protobuf:"bytes,13,opt,name=bool,oneof"`
+}
+
+type FieldRules_String_ struct {
+ String_ *StringRules `protobuf:"bytes,14,opt,name=string,oneof"`
+}
+
+type FieldRules_Bytes struct {
+ Bytes *BytesRules `protobuf:"bytes,15,opt,name=bytes,oneof"`
+}
+
+type FieldRules_Enum struct {
+ // Complex Field Types
+ Enum *EnumRules `protobuf:"bytes,16,opt,name=enum,oneof"`
+}
+
+type FieldRules_Repeated struct {
+ Repeated *RepeatedRules `protobuf:"bytes,18,opt,name=repeated,oneof"`
+}
+
+type FieldRules_Map struct {
+ Map *MapRules `protobuf:"bytes,19,opt,name=map,oneof"`
+}
+
+type FieldRules_Any struct {
+ // Well-Known Field Types
+ Any *AnyRules `protobuf:"bytes,20,opt,name=any,oneof"`
+}
+
+type FieldRules_Duration struct {
+ Duration *DurationRules `protobuf:"bytes,21,opt,name=duration,oneof"`
+}
+
+type FieldRules_Timestamp struct {
+ Timestamp *TimestampRules `protobuf:"bytes,22,opt,name=timestamp,oneof"`
+}
+
+func (*FieldRules_Float) isFieldRules_Type() {}
+
+func (*FieldRules_Double) isFieldRules_Type() {}
+
+func (*FieldRules_Int32) isFieldRules_Type() {}
+
+func (*FieldRules_Int64) isFieldRules_Type() {}
+
+func (*FieldRules_Uint32) isFieldRules_Type() {}
+
+func (*FieldRules_Uint64) isFieldRules_Type() {}
+
+func (*FieldRules_Sint32) isFieldRules_Type() {}
+
+func (*FieldRules_Sint64) isFieldRules_Type() {}
+
+func (*FieldRules_Fixed32) isFieldRules_Type() {}
+
+func (*FieldRules_Fixed64) isFieldRules_Type() {}
+
+func (*FieldRules_Sfixed32) isFieldRules_Type() {}
+
+func (*FieldRules_Sfixed64) isFieldRules_Type() {}
+
+func (*FieldRules_Bool) isFieldRules_Type() {}
+
+func (*FieldRules_String_) isFieldRules_Type() {}
+
+func (*FieldRules_Bytes) isFieldRules_Type() {}
+
+func (*FieldRules_Enum) isFieldRules_Type() {}
+
+func (*FieldRules_Repeated) isFieldRules_Type() {}
+
+func (*FieldRules_Map) isFieldRules_Type() {}
+
+func (*FieldRules_Any) isFieldRules_Type() {}
+
+func (*FieldRules_Duration) isFieldRules_Type() {}
+
+func (*FieldRules_Timestamp) isFieldRules_Type() {}
+
+// FloatRules describes the constraints applied to `float` values
+type FloatRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *float32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *float32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *float32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *float32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *float32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []float32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []float32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *FloatRules) Reset() {
+ *x = FloatRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FloatRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FloatRules) ProtoMessage() {}
+
+func (x *FloatRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FloatRules.ProtoReflect.Descriptor instead.
+func (*FloatRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FloatRules) GetConst() float32 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *FloatRules) GetLt() float32 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *FloatRules) GetLte() float32 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *FloatRules) GetGt() float32 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *FloatRules) GetGte() float32 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *FloatRules) GetIn() []float32 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *FloatRules) GetNotIn() []float32 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *FloatRules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// DoubleRules describes the constraints applied to `double` values
+type DoubleRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *float64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *float64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *float64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *float64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *float64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []float64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []float64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *DoubleRules) Reset() {
+ *x = DoubleRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRules) ProtoMessage() {}
+
+func (x *DoubleRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRules.ProtoReflect.Descriptor instead.
+func (*DoubleRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DoubleRules) GetConst() float64 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *DoubleRules) GetLt() float64 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *DoubleRules) GetLte() float64 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *DoubleRules) GetGt() float64 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *DoubleRules) GetGte() float64 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *DoubleRules) GetIn() []float64 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *DoubleRules) GetNotIn() []float64 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *DoubleRules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// Int32Rules describes the constraints applied to `int32` values
+type Int32Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *int32 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *int32 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *int32 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *int32 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []int32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []int32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *Int32Rules) Reset() {
+ *x = Int32Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Rules) ProtoMessage() {}
+
+func (x *Int32Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Rules.ProtoReflect.Descriptor instead.
+func (*Int32Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Int32Rules) GetConst() int32 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *Int32Rules) GetLt() int32 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *Int32Rules) GetLte() int32 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *Int32Rules) GetGt() int32 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *Int32Rules) GetGte() int32 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *Int32Rules) GetIn() []int32 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *Int32Rules) GetNotIn() []int32 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *Int32Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// Int64Rules describes the constraints applied to `int64` values
+type Int64Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *int64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *int64 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *int64 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *int64 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *int64 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []int64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []int64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *Int64Rules) Reset() {
+ *x = Int64Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Rules) ProtoMessage() {}
+
+func (x *Int64Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Rules.ProtoReflect.Descriptor instead.
+func (*Int64Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Int64Rules) GetConst() int64 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *Int64Rules) GetLt() int64 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *Int64Rules) GetLte() int64 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *Int64Rules) GetGt() int64 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *Int64Rules) GetGte() int64 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *Int64Rules) GetIn() []int64 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *Int64Rules) GetNotIn() []int64 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *Int64Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// UInt32Rules describes the constraints applied to `uint32` values
+type UInt32Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *uint32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *uint32 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *uint32 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *uint32 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *uint32 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []uint32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []uint32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *UInt32Rules) Reset() {
+ *x = UInt32Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UInt32Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt32Rules) ProtoMessage() {}
+
+func (x *UInt32Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt32Rules.ProtoReflect.Descriptor instead.
+func (*UInt32Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *UInt32Rules) GetConst() uint32 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *UInt32Rules) GetLt() uint32 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *UInt32Rules) GetLte() uint32 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *UInt32Rules) GetGt() uint32 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *UInt32Rules) GetGte() uint32 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *UInt32Rules) GetIn() []uint32 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *UInt32Rules) GetNotIn() []uint32 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *UInt32Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// UInt64Rules describes the constraints applied to `uint64` values
+type UInt64Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *uint64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *uint64 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *uint64 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *uint64 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *uint64 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []uint64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []uint64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *UInt64Rules) Reset() {
+ *x = UInt64Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UInt64Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt64Rules) ProtoMessage() {}
+
+func (x *UInt64Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt64Rules.ProtoReflect.Descriptor instead.
+func (*UInt64Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *UInt64Rules) GetConst() uint64 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *UInt64Rules) GetLt() uint64 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *UInt64Rules) GetLte() uint64 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *UInt64Rules) GetGt() uint64 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *UInt64Rules) GetGte() uint64 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *UInt64Rules) GetIn() []uint64 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *UInt64Rules) GetNotIn() []uint64 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *UInt64Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// SInt32Rules describes the constraints applied to `sint32` values
+type SInt32Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *int32 `protobuf:"zigzag32,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *int32 `protobuf:"zigzag32,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *int32 `protobuf:"zigzag32,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *int32 `protobuf:"zigzag32,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *int32 `protobuf:"zigzag32,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []int32 `protobuf:"zigzag32,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []int32 `protobuf:"zigzag32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *SInt32Rules) Reset() {
+ *x = SInt32Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SInt32Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SInt32Rules) ProtoMessage() {}
+
+func (x *SInt32Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SInt32Rules.ProtoReflect.Descriptor instead.
+func (*SInt32Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *SInt32Rules) GetConst() int32 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *SInt32Rules) GetLt() int32 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *SInt32Rules) GetLte() int32 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *SInt32Rules) GetGt() int32 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *SInt32Rules) GetGte() int32 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *SInt32Rules) GetIn() []int32 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *SInt32Rules) GetNotIn() []int32 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *SInt32Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// SInt64Rules describes the constraints applied to `sint64` values
+type SInt64Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *int64 `protobuf:"zigzag64,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *int64 `protobuf:"zigzag64,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *int64 `protobuf:"zigzag64,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *int64 `protobuf:"zigzag64,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *int64 `protobuf:"zigzag64,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []int64 `protobuf:"zigzag64,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []int64 `protobuf:"zigzag64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *SInt64Rules) Reset() {
+ *x = SInt64Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SInt64Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SInt64Rules) ProtoMessage() {}
+
+func (x *SInt64Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SInt64Rules.ProtoReflect.Descriptor instead.
+func (*SInt64Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *SInt64Rules) GetConst() int64 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *SInt64Rules) GetLt() int64 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *SInt64Rules) GetLte() int64 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *SInt64Rules) GetGt() int64 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *SInt64Rules) GetGte() int64 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *SInt64Rules) GetIn() []int64 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *SInt64Rules) GetNotIn() []int64 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *SInt64Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// Fixed32Rules describes the constraints applied to `fixed32` values
+type Fixed32Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *uint32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *uint32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *uint32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *uint32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *uint32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []uint32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []uint32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *Fixed32Rules) Reset() {
+ *x = Fixed32Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Fixed32Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Fixed32Rules) ProtoMessage() {}
+
+func (x *Fixed32Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Fixed32Rules.ProtoReflect.Descriptor instead.
+func (*Fixed32Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Fixed32Rules) GetConst() uint32 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *Fixed32Rules) GetLt() uint32 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *Fixed32Rules) GetLte() uint32 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *Fixed32Rules) GetGt() uint32 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *Fixed32Rules) GetGte() uint32 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *Fixed32Rules) GetIn() []uint32 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *Fixed32Rules) GetNotIn() []uint32 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *Fixed32Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// Fixed64Rules describes the constraints applied to `fixed64` values
+type Fixed64Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *uint64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *uint64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *uint64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *uint64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *uint64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []uint64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []uint64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *Fixed64Rules) Reset() {
+ *x = Fixed64Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Fixed64Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Fixed64Rules) ProtoMessage() {}
+
+func (x *Fixed64Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Fixed64Rules.ProtoReflect.Descriptor instead.
+func (*Fixed64Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Fixed64Rules) GetConst() uint64 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *Fixed64Rules) GetLt() uint64 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *Fixed64Rules) GetLte() uint64 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *Fixed64Rules) GetGt() uint64 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *Fixed64Rules) GetGte() uint64 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *Fixed64Rules) GetIn() []uint64 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *Fixed64Rules) GetNotIn() []uint64 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *Fixed64Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// SFixed32Rules describes the constraints applied to `sfixed32` values
+type SFixed32Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *int32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *int32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *int32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *int32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *int32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []int32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []int32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *SFixed32Rules) Reset() {
+ *x = SFixed32Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SFixed32Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SFixed32Rules) ProtoMessage() {}
+
+func (x *SFixed32Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SFixed32Rules.ProtoReflect.Descriptor instead.
+func (*SFixed32Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *SFixed32Rules) GetConst() int32 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *SFixed32Rules) GetLt() int32 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *SFixed32Rules) GetLte() int32 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *SFixed32Rules) GetGt() int32 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *SFixed32Rules) GetGte() int32 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *SFixed32Rules) GetIn() []int32 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *SFixed32Rules) GetNotIn() []int32 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *SFixed32Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// SFixed64Rules describes the constraints applied to `sfixed64` values
+type SFixed64Rules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *int64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *int64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ Lte *int64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ Gt *int64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ Gte *int64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []int64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []int64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *SFixed64Rules) Reset() {
+ *x = SFixed64Rules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SFixed64Rules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SFixed64Rules) ProtoMessage() {}
+
+func (x *SFixed64Rules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SFixed64Rules.ProtoReflect.Descriptor instead.
+func (*SFixed64Rules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *SFixed64Rules) GetConst() int64 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *SFixed64Rules) GetLt() int64 {
+ if x != nil && x.Lt != nil {
+ return *x.Lt
+ }
+ return 0
+}
+
+func (x *SFixed64Rules) GetLte() int64 {
+ if x != nil && x.Lte != nil {
+ return *x.Lte
+ }
+ return 0
+}
+
+func (x *SFixed64Rules) GetGt() int64 {
+ if x != nil && x.Gt != nil {
+ return *x.Gt
+ }
+ return 0
+}
+
+func (x *SFixed64Rules) GetGte() int64 {
+ if x != nil && x.Gte != nil {
+ return *x.Gte
+ }
+ return 0
+}
+
+func (x *SFixed64Rules) GetIn() []int64 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *SFixed64Rules) GetNotIn() []int64 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (x *SFixed64Rules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// BoolRules describes the constraints applied to `bool` values
+type BoolRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *bool `protobuf:"varint,1,opt,name=const" json:"const,omitempty"`
+}
+
+func (x *BoolRules) Reset() {
+ *x = BoolRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BoolRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BoolRules) ProtoMessage() {}
+
+func (x *BoolRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BoolRules.ProtoReflect.Descriptor instead.
+func (*BoolRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *BoolRules) GetConst() bool {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return false
+}
+
+// StringRules describe the constraints applied to `string` values
+type StringRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *string `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"`
+ // Len specifies that this field must be the specified number of
+ // characters (Unicode code points). Note that the number of
+ // characters may differ from the number of bytes in the string.
+ Len *uint64 `protobuf:"varint,19,opt,name=len" json:"len,omitempty"`
+ // MinLen specifies that this field must be the specified number of
+ // characters (Unicode code points) at a minimum. Note that the number of
+ // characters may differ from the number of bytes in the string.
+ MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"`
+ // MaxLen specifies that this field must be the specified number of
+ // characters (Unicode code points) at a maximum. Note that the number of
+ // characters may differ from the number of bytes in the string.
+ MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"`
+ // LenBytes specifies that this field must be the specified number of bytes
+ // at a minimum
+ LenBytes *uint64 `protobuf:"varint,20,opt,name=len_bytes,json=lenBytes" json:"len_bytes,omitempty"`
+ // MinBytes specifies that this field must be the specified number of bytes
+ // at a minimum
+ MinBytes *uint64 `protobuf:"varint,4,opt,name=min_bytes,json=minBytes" json:"min_bytes,omitempty"`
+ // MaxBytes specifies that this field must be the specified number of bytes
+ // at a maximum
+ MaxBytes *uint64 `protobuf:"varint,5,opt,name=max_bytes,json=maxBytes" json:"max_bytes,omitempty"`
+ // Pattern specifes that this field must match against the specified
+ // regular expression (RE2 syntax). The included expression should elide
+ // any delimiters.
+ Pattern *string `protobuf:"bytes,6,opt,name=pattern" json:"pattern,omitempty"`
+ // Prefix specifies that this field must have the specified substring at
+ // the beginning of the string.
+ Prefix *string `protobuf:"bytes,7,opt,name=prefix" json:"prefix,omitempty"`
+ // Suffix specifies that this field must have the specified substring at
+ // the end of the string.
+ Suffix *string `protobuf:"bytes,8,opt,name=suffix" json:"suffix,omitempty"`
+ // Contains specifies that this field must have the specified substring
+ // anywhere in the string.
+ Contains *string `protobuf:"bytes,9,opt,name=contains" json:"contains,omitempty"`
+ // NotContains specifies that this field cannot have the specified substring
+ // anywhere in the string.
+ NotContains *string `protobuf:"bytes,23,opt,name=not_contains,json=notContains" json:"not_contains,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []string `protobuf:"bytes,10,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []string `protobuf:"bytes,11,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // WellKnown rules provide advanced constraints against common string
+ // patterns
+ //
+ // Types that are assignable to WellKnown:
+ // *StringRules_Email
+ // *StringRules_Hostname
+ // *StringRules_Ip
+ // *StringRules_Ipv4
+ // *StringRules_Ipv6
+ // *StringRules_Uri
+ // *StringRules_UriRef
+ // *StringRules_Address
+ // *StringRules_Uuid
+ // *StringRules_WellKnownRegex
+ WellKnown isStringRules_WellKnown `protobuf_oneof:"well_known"`
+ // This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable
+ // strict header validation.
+ // By default, this is true, and HTTP header validations are RFC-compliant.
+ // Setting to false will enable a looser validations that only disallows
+ // \r\n\0 characters, which can be used to bypass header matching rules.
+ Strict *bool `protobuf:"varint,25,opt,name=strict,def=1" json:"strict,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,26,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+// Default values for StringRules fields.
+const (
+ Default_StringRules_Strict = bool(true)
+)
+
+func (x *StringRules) Reset() {
+ *x = StringRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StringRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringRules) ProtoMessage() {}
+
+func (x *StringRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringRules.ProtoReflect.Descriptor instead.
+func (*StringRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *StringRules) GetConst() string {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return ""
+}
+
+func (x *StringRules) GetLen() uint64 {
+ if x != nil && x.Len != nil {
+ return *x.Len
+ }
+ return 0
+}
+
+func (x *StringRules) GetMinLen() uint64 {
+ if x != nil && x.MinLen != nil {
+ return *x.MinLen
+ }
+ return 0
+}
+
+func (x *StringRules) GetMaxLen() uint64 {
+ if x != nil && x.MaxLen != nil {
+ return *x.MaxLen
+ }
+ return 0
+}
+
+func (x *StringRules) GetLenBytes() uint64 {
+ if x != nil && x.LenBytes != nil {
+ return *x.LenBytes
+ }
+ return 0
+}
+
+func (x *StringRules) GetMinBytes() uint64 {
+ if x != nil && x.MinBytes != nil {
+ return *x.MinBytes
+ }
+ return 0
+}
+
+func (x *StringRules) GetMaxBytes() uint64 {
+ if x != nil && x.MaxBytes != nil {
+ return *x.MaxBytes
+ }
+ return 0
+}
+
+func (x *StringRules) GetPattern() string {
+ if x != nil && x.Pattern != nil {
+ return *x.Pattern
+ }
+ return ""
+}
+
+func (x *StringRules) GetPrefix() string {
+ if x != nil && x.Prefix != nil {
+ return *x.Prefix
+ }
+ return ""
+}
+
+func (x *StringRules) GetSuffix() string {
+ if x != nil && x.Suffix != nil {
+ return *x.Suffix
+ }
+ return ""
+}
+
+func (x *StringRules) GetContains() string {
+ if x != nil && x.Contains != nil {
+ return *x.Contains
+ }
+ return ""
+}
+
+func (x *StringRules) GetNotContains() string {
+ if x != nil && x.NotContains != nil {
+ return *x.NotContains
+ }
+ return ""
+}
+
+func (x *StringRules) GetIn() []string {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *StringRules) GetNotIn() []string {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (m *StringRules) GetWellKnown() isStringRules_WellKnown {
+ if m != nil {
+ return m.WellKnown
+ }
+ return nil
+}
+
+func (x *StringRules) GetEmail() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Email); ok {
+ return x.Email
+ }
+ return false
+}
+
+func (x *StringRules) GetHostname() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Hostname); ok {
+ return x.Hostname
+ }
+ return false
+}
+
+func (x *StringRules) GetIp() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Ip); ok {
+ return x.Ip
+ }
+ return false
+}
+
+func (x *StringRules) GetIpv4() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Ipv4); ok {
+ return x.Ipv4
+ }
+ return false
+}
+
+func (x *StringRules) GetIpv6() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Ipv6); ok {
+ return x.Ipv6
+ }
+ return false
+}
+
+func (x *StringRules) GetUri() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Uri); ok {
+ return x.Uri
+ }
+ return false
+}
+
+func (x *StringRules) GetUriRef() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_UriRef); ok {
+ return x.UriRef
+ }
+ return false
+}
+
+func (x *StringRules) GetAddress() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Address); ok {
+ return x.Address
+ }
+ return false
+}
+
+func (x *StringRules) GetUuid() bool {
+ if x, ok := x.GetWellKnown().(*StringRules_Uuid); ok {
+ return x.Uuid
+ }
+ return false
+}
+
+func (x *StringRules) GetWellKnownRegex() KnownRegex {
+ if x, ok := x.GetWellKnown().(*StringRules_WellKnownRegex); ok {
+ return x.WellKnownRegex
+ }
+ return KnownRegex_UNKNOWN
+}
+
+func (x *StringRules) GetStrict() bool {
+ if x != nil && x.Strict != nil {
+ return *x.Strict
+ }
+ return Default_StringRules_Strict
+}
+
+func (x *StringRules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+type isStringRules_WellKnown interface {
+ isStringRules_WellKnown()
+}
+
+type StringRules_Email struct {
+ // Email specifies that the field must be a valid email address as
+ // defined by RFC 5322
+ Email bool `protobuf:"varint,12,opt,name=email,oneof"`
+}
+
+type StringRules_Hostname struct {
+ // Hostname specifies that the field must be a valid hostname as
+ // defined by RFC 1034. This constraint does not support
+ // internationalized domain names (IDNs).
+ Hostname bool `protobuf:"varint,13,opt,name=hostname,oneof"`
+}
+
+type StringRules_Ip struct {
+ // Ip specifies that the field must be a valid IP (v4 or v6) address.
+ // Valid IPv6 addresses should not include surrounding square brackets.
+ Ip bool `protobuf:"varint,14,opt,name=ip,oneof"`
+}
+
+type StringRules_Ipv4 struct {
+ // Ipv4 specifies that the field must be a valid IPv4 address.
+ Ipv4 bool `protobuf:"varint,15,opt,name=ipv4,oneof"`
+}
+
+type StringRules_Ipv6 struct {
+ // Ipv6 specifies that the field must be a valid IPv6 address. Valid
+ // IPv6 addresses should not include surrounding square brackets.
+ Ipv6 bool `protobuf:"varint,16,opt,name=ipv6,oneof"`
+}
+
+type StringRules_Uri struct {
+ // Uri specifies that the field must be a valid, absolute URI as defined
+ // by RFC 3986
+ Uri bool `protobuf:"varint,17,opt,name=uri,oneof"`
+}
+
+type StringRules_UriRef struct {
+ // UriRef specifies that the field must be a valid URI as defined by RFC
+ // 3986 and may be relative or absolute.
+ UriRef bool `protobuf:"varint,18,opt,name=uri_ref,json=uriRef,oneof"`
+}
+
+type StringRules_Address struct {
+ // Address specifies that the field must be either a valid hostname as
+ // defined by RFC 1034 (which does not support internationalized domain
+ // names or IDNs), or it can be a valid IP (v4 or v6).
+ Address bool `protobuf:"varint,21,opt,name=address,oneof"`
+}
+
+type StringRules_Uuid struct {
+ // Uuid specifies that the field must be a valid UUID as defined by
+ // RFC 4122
+ Uuid bool `protobuf:"varint,22,opt,name=uuid,oneof"`
+}
+
+type StringRules_WellKnownRegex struct {
+ // WellKnownRegex specifies a common well known pattern defined as a regex.
+ WellKnownRegex KnownRegex `protobuf:"varint,24,opt,name=well_known_regex,json=wellKnownRegex,enum=validate.KnownRegex,oneof"`
+}
+
+func (*StringRules_Email) isStringRules_WellKnown() {}
+
+func (*StringRules_Hostname) isStringRules_WellKnown() {}
+
+func (*StringRules_Ip) isStringRules_WellKnown() {}
+
+func (*StringRules_Ipv4) isStringRules_WellKnown() {}
+
+func (*StringRules_Ipv6) isStringRules_WellKnown() {}
+
+func (*StringRules_Uri) isStringRules_WellKnown() {}
+
+func (*StringRules_UriRef) isStringRules_WellKnown() {}
+
+func (*StringRules_Address) isStringRules_WellKnown() {}
+
+func (*StringRules_Uuid) isStringRules_WellKnown() {}
+
+func (*StringRules_WellKnownRegex) isStringRules_WellKnown() {}
+
+// BytesRules describe the constraints applied to `bytes` values
+type BytesRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const []byte `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"`
+ // Len specifies that this field must be the specified number of bytes
+ Len *uint64 `protobuf:"varint,13,opt,name=len" json:"len,omitempty"`
+ // MinLen specifies that this field must be the specified number of bytes
+ // at a minimum
+ MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"`
+ // MaxLen specifies that this field must be the specified number of bytes
+ // at a maximum
+ MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"`
+ // Pattern specifes that this field must match against the specified
+ // regular expression (RE2 syntax). The included expression should elide
+ // any delimiters.
+ Pattern *string `protobuf:"bytes,4,opt,name=pattern" json:"pattern,omitempty"`
+ // Prefix specifies that this field must have the specified bytes at the
+ // beginning of the string.
+ Prefix []byte `protobuf:"bytes,5,opt,name=prefix" json:"prefix,omitempty"`
+ // Suffix specifies that this field must have the specified bytes at the
+ // end of the string.
+ Suffix []byte `protobuf:"bytes,6,opt,name=suffix" json:"suffix,omitempty"`
+ // Contains specifies that this field must have the specified bytes
+ // anywhere in the string.
+ Contains []byte `protobuf:"bytes,7,opt,name=contains" json:"contains,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In [][]byte `protobuf:"bytes,8,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn [][]byte `protobuf:"bytes,9,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+ // WellKnown rules provide advanced constraints against common byte
+ // patterns
+ //
+ // Types that are assignable to WellKnown:
+ // *BytesRules_Ip
+ // *BytesRules_Ipv4
+ // *BytesRules_Ipv6
+ WellKnown isBytesRules_WellKnown `protobuf_oneof:"well_known"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,14,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *BytesRules) Reset() {
+ *x = BytesRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BytesRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BytesRules) ProtoMessage() {}
+
+func (x *BytesRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BytesRules.ProtoReflect.Descriptor instead.
+func (*BytesRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *BytesRules) GetConst() []byte {
+ if x != nil {
+ return x.Const
+ }
+ return nil
+}
+
+func (x *BytesRules) GetLen() uint64 {
+ if x != nil && x.Len != nil {
+ return *x.Len
+ }
+ return 0
+}
+
+func (x *BytesRules) GetMinLen() uint64 {
+ if x != nil && x.MinLen != nil {
+ return *x.MinLen
+ }
+ return 0
+}
+
+func (x *BytesRules) GetMaxLen() uint64 {
+ if x != nil && x.MaxLen != nil {
+ return *x.MaxLen
+ }
+ return 0
+}
+
+func (x *BytesRules) GetPattern() string {
+ if x != nil && x.Pattern != nil {
+ return *x.Pattern
+ }
+ return ""
+}
+
+func (x *BytesRules) GetPrefix() []byte {
+ if x != nil {
+ return x.Prefix
+ }
+ return nil
+}
+
+func (x *BytesRules) GetSuffix() []byte {
+ if x != nil {
+ return x.Suffix
+ }
+ return nil
+}
+
+func (x *BytesRules) GetContains() []byte {
+ if x != nil {
+ return x.Contains
+ }
+ return nil
+}
+
+func (x *BytesRules) GetIn() [][]byte {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *BytesRules) GetNotIn() [][]byte {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+func (m *BytesRules) GetWellKnown() isBytesRules_WellKnown {
+ if m != nil {
+ return m.WellKnown
+ }
+ return nil
+}
+
+func (x *BytesRules) GetIp() bool {
+ if x, ok := x.GetWellKnown().(*BytesRules_Ip); ok {
+ return x.Ip
+ }
+ return false
+}
+
+func (x *BytesRules) GetIpv4() bool {
+ if x, ok := x.GetWellKnown().(*BytesRules_Ipv4); ok {
+ return x.Ipv4
+ }
+ return false
+}
+
+func (x *BytesRules) GetIpv6() bool {
+ if x, ok := x.GetWellKnown().(*BytesRules_Ipv6); ok {
+ return x.Ipv6
+ }
+ return false
+}
+
+func (x *BytesRules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+type isBytesRules_WellKnown interface {
+ isBytesRules_WellKnown()
+}
+
+type BytesRules_Ip struct {
+ // Ip specifies that the field must be a valid IP (v4 or v6) address in
+ // byte format
+ Ip bool `protobuf:"varint,10,opt,name=ip,oneof"`
+}
+
+type BytesRules_Ipv4 struct {
+ // Ipv4 specifies that the field must be a valid IPv4 address in byte
+ // format
+ Ipv4 bool `protobuf:"varint,11,opt,name=ipv4,oneof"`
+}
+
+type BytesRules_Ipv6 struct {
+ // Ipv6 specifies that the field must be a valid IPv6 address in byte
+ // format
+ Ipv6 bool `protobuf:"varint,12,opt,name=ipv6,oneof"`
+}
+
+func (*BytesRules_Ip) isBytesRules_WellKnown() {}
+
+func (*BytesRules_Ipv4) isBytesRules_WellKnown() {}
+
+func (*BytesRules_Ipv6) isBytesRules_WellKnown() {}
+
+// EnumRules describe the constraints applied to enum values
+type EnumRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Const specifies that this field must be exactly the specified value
+ Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"`
+ // DefinedOnly specifies that this field must be only one of the defined
+ // values for this enum, failing on any undefined value.
+ DefinedOnly *bool `protobuf:"varint,2,opt,name=defined_only,json=definedOnly" json:"defined_only,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []int32 `protobuf:"varint,3,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []int32 `protobuf:"varint,4,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+}
+
+func (x *EnumRules) Reset() {
+ *x = EnumRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumRules) ProtoMessage() {}
+
+func (x *EnumRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumRules.ProtoReflect.Descriptor instead.
+func (*EnumRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *EnumRules) GetConst() int32 {
+ if x != nil && x.Const != nil {
+ return *x.Const
+ }
+ return 0
+}
+
+func (x *EnumRules) GetDefinedOnly() bool {
+ if x != nil && x.DefinedOnly != nil {
+ return *x.DefinedOnly
+ }
+ return false
+}
+
+func (x *EnumRules) GetIn() []int32 {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *EnumRules) GetNotIn() []int32 {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+// MessageRules describe the constraints applied to embedded message values.
+// For message-type fields, validation is performed recursively.
+type MessageRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Skip specifies that the validation rules of this field should not be
+ // evaluated
+ Skip *bool `protobuf:"varint,1,opt,name=skip" json:"skip,omitempty"`
+ // Required specifies that this field must be set
+ Required *bool `protobuf:"varint,2,opt,name=required" json:"required,omitempty"`
+}
+
+func (x *MessageRules) Reset() {
+ *x = MessageRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MessageRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageRules) ProtoMessage() {}
+
+func (x *MessageRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageRules.ProtoReflect.Descriptor instead.
+func (*MessageRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *MessageRules) GetSkip() bool {
+ if x != nil && x.Skip != nil {
+ return *x.Skip
+ }
+ return false
+}
+
+func (x *MessageRules) GetRequired() bool {
+ if x != nil && x.Required != nil {
+ return *x.Required
+ }
+ return false
+}
+
+// RepeatedRules describe the constraints applied to `repeated` values
+type RepeatedRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // MinItems specifies that this field must have the specified number of
+ // items at a minimum
+ MinItems *uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+ // MaxItems specifies that this field must have the specified number of
+ // items at a maximum
+ MaxItems *uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+ // Unique specifies that all elements in this field must be unique. This
+ // contraint is only applicable to scalar and enum types (messages are not
+ // supported).
+ Unique *bool `protobuf:"varint,3,opt,name=unique" json:"unique,omitempty"`
+ // Items specifies the contraints to be applied to each item in the field.
+ // Repeated message fields will still execute validation against each item
+ // unless skip is specified here.
+ Items *FieldRules `protobuf:"bytes,4,opt,name=items" json:"items,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,5,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *RepeatedRules) Reset() {
+ *x = RepeatedRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RepeatedRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RepeatedRules) ProtoMessage() {}
+
+func (x *RepeatedRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RepeatedRules.ProtoReflect.Descriptor instead.
+func (*RepeatedRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *RepeatedRules) GetMinItems() uint64 {
+ if x != nil && x.MinItems != nil {
+ return *x.MinItems
+ }
+ return 0
+}
+
+func (x *RepeatedRules) GetMaxItems() uint64 {
+ if x != nil && x.MaxItems != nil {
+ return *x.MaxItems
+ }
+ return 0
+}
+
+func (x *RepeatedRules) GetUnique() bool {
+ if x != nil && x.Unique != nil {
+ return *x.Unique
+ }
+ return false
+}
+
+func (x *RepeatedRules) GetItems() *FieldRules {
+ if x != nil {
+ return x.Items
+ }
+ return nil
+}
+
+func (x *RepeatedRules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// MapRules describe the constraints applied to `map` values
+type MapRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // MinPairs specifies that this field must have the specified number of
+ // KVs at a minimum
+ MinPairs *uint64 `protobuf:"varint,1,opt,name=min_pairs,json=minPairs" json:"min_pairs,omitempty"`
+ // MaxPairs specifies that this field must have the specified number of
+ // KVs at a maximum
+ MaxPairs *uint64 `protobuf:"varint,2,opt,name=max_pairs,json=maxPairs" json:"max_pairs,omitempty"`
+ // NoSparse specifies values in this field cannot be unset. This only
+ // applies to map's with message value types.
+ NoSparse *bool `protobuf:"varint,3,opt,name=no_sparse,json=noSparse" json:"no_sparse,omitempty"`
+ // Keys specifies the constraints to be applied to each key in the field.
+ Keys *FieldRules `protobuf:"bytes,4,opt,name=keys" json:"keys,omitempty"`
+ // Values specifies the constraints to be applied to the value of each key
+ // in the field. Message values will still have their validations evaluated
+ // unless skip is specified here.
+ Values *FieldRules `protobuf:"bytes,5,opt,name=values" json:"values,omitempty"`
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ IgnoreEmpty *bool `protobuf:"varint,6,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"`
+}
+
+func (x *MapRules) Reset() {
+ *x = MapRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapRules) ProtoMessage() {}
+
+func (x *MapRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapRules.ProtoReflect.Descriptor instead.
+func (*MapRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *MapRules) GetMinPairs() uint64 {
+ if x != nil && x.MinPairs != nil {
+ return *x.MinPairs
+ }
+ return 0
+}
+
+func (x *MapRules) GetMaxPairs() uint64 {
+ if x != nil && x.MaxPairs != nil {
+ return *x.MaxPairs
+ }
+ return 0
+}
+
+func (x *MapRules) GetNoSparse() bool {
+ if x != nil && x.NoSparse != nil {
+ return *x.NoSparse
+ }
+ return false
+}
+
+func (x *MapRules) GetKeys() *FieldRules {
+ if x != nil {
+ return x.Keys
+ }
+ return nil
+}
+
+func (x *MapRules) GetValues() *FieldRules {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *MapRules) GetIgnoreEmpty() bool {
+ if x != nil && x.IgnoreEmpty != nil {
+ return *x.IgnoreEmpty
+ }
+ return false
+}
+
+// AnyRules describe constraints applied exclusively to the
+// `google.protobuf.Any` well-known type
+type AnyRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required specifies that this field must be set
+ Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
+ // In specifies that this field's `type_url` must be equal to one of the
+ // specified values.
+ In []string `protobuf:"bytes,2,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field's `type_url` must not be equal to any of
+ // the specified values.
+ NotIn []string `protobuf:"bytes,3,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+}
+
+func (x *AnyRules) Reset() {
+ *x = AnyRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AnyRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AnyRules) ProtoMessage() {}
+
+func (x *AnyRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AnyRules.ProtoReflect.Descriptor instead.
+func (*AnyRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *AnyRules) GetRequired() bool {
+ if x != nil && x.Required != nil {
+ return *x.Required
+ }
+ return false
+}
+
+func (x *AnyRules) GetIn() []string {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *AnyRules) GetNotIn() []string {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+// DurationRules describe the constraints applied exclusively to the
+// `google.protobuf.Duration` well-known type
+type DurationRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required specifies that this field must be set
+ Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
+ // Const specifies that this field must be exactly the specified value
+ Const *durationpb.Duration `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *durationpb.Duration `protobuf:"bytes,3,opt,name=lt" json:"lt,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // inclusive
+ Lte *durationpb.Duration `protobuf:"bytes,4,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive
+ Gt *durationpb.Duration `protobuf:"bytes,5,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than the specified value,
+ // inclusive
+ Gte *durationpb.Duration `protobuf:"bytes,6,opt,name=gte" json:"gte,omitempty"`
+ // In specifies that this field must be equal to one of the specified
+ // values
+ In []*durationpb.Duration `protobuf:"bytes,7,rep,name=in" json:"in,omitempty"`
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ NotIn []*durationpb.Duration `protobuf:"bytes,8,rep,name=not_in,json=notIn" json:"not_in,omitempty"`
+}
+
+func (x *DurationRules) Reset() {
+ *x = DurationRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DurationRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DurationRules) ProtoMessage() {}
+
+func (x *DurationRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DurationRules.ProtoReflect.Descriptor instead.
+func (*DurationRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *DurationRules) GetRequired() bool {
+ if x != nil && x.Required != nil {
+ return *x.Required
+ }
+ return false
+}
+
+func (x *DurationRules) GetConst() *durationpb.Duration {
+ if x != nil {
+ return x.Const
+ }
+ return nil
+}
+
+func (x *DurationRules) GetLt() *durationpb.Duration {
+ if x != nil {
+ return x.Lt
+ }
+ return nil
+}
+
+func (x *DurationRules) GetLte() *durationpb.Duration {
+ if x != nil {
+ return x.Lte
+ }
+ return nil
+}
+
+func (x *DurationRules) GetGt() *durationpb.Duration {
+ if x != nil {
+ return x.Gt
+ }
+ return nil
+}
+
+func (x *DurationRules) GetGte() *durationpb.Duration {
+ if x != nil {
+ return x.Gte
+ }
+ return nil
+}
+
+func (x *DurationRules) GetIn() []*durationpb.Duration {
+ if x != nil {
+ return x.In
+ }
+ return nil
+}
+
+func (x *DurationRules) GetNotIn() []*durationpb.Duration {
+ if x != nil {
+ return x.NotIn
+ }
+ return nil
+}
+
+// TimestampRules describe the constraints applied exclusively to the
+// `google.protobuf.Timestamp` well-known type
+type TimestampRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required specifies that this field must be set
+ Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
+ // Const specifies that this field must be exactly the specified value
+ Const *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"`
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ Lt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=lt" json:"lt,omitempty"`
+ // Lte specifies that this field must be less than the specified value,
+ // inclusive
+ Lte *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=lte" json:"lte,omitempty"`
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive
+ Gt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=gt" json:"gt,omitempty"`
+ // Gte specifies that this field must be greater than the specified value,
+ // inclusive
+ Gte *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=gte" json:"gte,omitempty"`
+ // LtNow specifies that this must be less than the current time. LtNow
+ // can only be used with the Within rule.
+ LtNow *bool `protobuf:"varint,7,opt,name=lt_now,json=ltNow" json:"lt_now,omitempty"`
+ // GtNow specifies that this must be greater than the current time. GtNow
+ // can only be used with the Within rule.
+ GtNow *bool `protobuf:"varint,8,opt,name=gt_now,json=gtNow" json:"gt_now,omitempty"`
+ // Within specifies that this field must be within this duration of the
+ // current time. This constraint can be used alone or with the LtNow and
+ // GtNow rules.
+ Within *durationpb.Duration `protobuf:"bytes,9,opt,name=within" json:"within,omitempty"`
+}
+
+func (x *TimestampRules) Reset() {
+ *x = TimestampRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_validate_validate_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TimestampRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimestampRules) ProtoMessage() {}
+
+func (x *TimestampRules) ProtoReflect() protoreflect.Message {
+ mi := &file_validate_validate_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimestampRules.ProtoReflect.Descriptor instead.
+func (*TimestampRules) Descriptor() ([]byte, []int) {
+ return file_validate_validate_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *TimestampRules) GetRequired() bool {
+ if x != nil && x.Required != nil {
+ return *x.Required
+ }
+ return false
+}
+
+func (x *TimestampRules) GetConst() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Const
+ }
+ return nil
+}
+
+func (x *TimestampRules) GetLt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Lt
+ }
+ return nil
+}
+
+func (x *TimestampRules) GetLte() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Lte
+ }
+ return nil
+}
+
+func (x *TimestampRules) GetGt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Gt
+ }
+ return nil
+}
+
+func (x *TimestampRules) GetGte() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Gte
+ }
+ return nil
+}
+
+func (x *TimestampRules) GetLtNow() bool {
+ if x != nil && x.LtNow != nil {
+ return *x.LtNow
+ }
+ return false
+}
+
+func (x *TimestampRules) GetGtNow() bool {
+ if x != nil && x.GtNow != nil {
+ return *x.GtNow
+ }
+ return false
+}
+
+func (x *TimestampRules) GetWithin() *durationpb.Duration {
+ if x != nil {
+ return x.Within
+ }
+ return nil
+}
+
+var file_validate_validate_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 1071,
+ Name: "validate.disabled",
+ Tag: "varint,1071,opt,name=disabled",
+ Filename: "validate/validate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 1072,
+ Name: "validate.ignored",
+ Tag: "varint,1072,opt,name=ignored",
+ Filename: "validate/validate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.OneofOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 1071,
+ Name: "validate.required",
+ Tag: "varint,1071,opt,name=required",
+ Filename: "validate/validate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldRules)(nil),
+ Field: 1071,
+ Name: "validate.rules",
+ Tag: "bytes,1071,opt,name=rules",
+ Filename: "validate/validate.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // Disabled nullifies any validation rules for this message, including any
+ // message fields associated with it that do support validation.
+ //
+ // optional bool disabled = 1071;
+ E_Disabled = &file_validate_validate_proto_extTypes[0]
+ // Ignore skips generation of validation methods for this message.
+ //
+ // optional bool ignored = 1072;
+ E_Ignored = &file_validate_validate_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.OneofOptions.
+var (
+ // Required ensures that exactly one the field options in a oneof is set;
+ // validation fails if no fields in the oneof are set.
+ //
+ // optional bool required = 1071;
+ E_Required = &file_validate_validate_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // Rules specify the validations to be performed on this field. By default,
+ // no validation is performed against a field.
+ //
+ // optional validate.FieldRules rules = 1071;
+ E_Rules = &file_validate_validate_proto_extTypes[3]
+)
+
+var File_validate_validate_proto protoreflect.FileDescriptor
+
+var file_validate_validate_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x08, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x07,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x66, 0x6c, 0x6f, 0x61, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05,
+ 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x69,
+ 0x6e, 0x74, 0x33, 0x32, 0x12, 0x2c, 0x0a, 0x05, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x49,
+ 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x12, 0x2f, 0x0a, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x75, 0x69, 0x6e,
+ 0x74, 0x33, 0x32, 0x12, 0x2f, 0x0a, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e,
+ 0x53, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73,
+ 0x69, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x53, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06,
+ 0x73, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33,
+ 0x32, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48,
+ 0x00, 0x52, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69,
+ 0x78, 0x65, 0x64, 0x36, 0x34, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x12, 0x35,
+ 0x0a, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x46, 0x69, 0x78,
+ 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x73, 0x66, 0x69,
+ 0x78, 0x65, 0x64, 0x33, 0x32, 0x12, 0x35, 0x0a, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36,
+ 0x34, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x53, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x48, 0x00, 0x52, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x12, 0x29, 0x0a, 0x04,
+ 0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48,
+ 0x00, 0x52, 0x04, 0x62, 0x6f, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00,
+ 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x2c, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52,
+ 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x10,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e,
+ 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x04, 0x65, 0x6e, 0x75,
+ 0x6d, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x12, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x52,
+ 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08,
+ 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18,
+ 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x4d, 0x61, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x70,
+ 0x12, 0x26, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65,
+ 0x73, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x16, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x09,
+ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
+ 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x02, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x02, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e,
+ 0x18, 0x06, 0x20, 0x03, 0x28, 0x02, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x02, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49,
+ 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74,
+ 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52,
+ 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02,
+ 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03,
+ 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e,
+ 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x01, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15,
+ 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x01, 0x52, 0x05,
+ 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f,
+ 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e,
+ 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x49, 0x6e, 0x74,
+ 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a,
+ 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a,
+ 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12,
+ 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x67, 0x74, 0x12,
+ 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x67, 0x74,
+ 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x02, 0x69,
+ 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x05, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
+ 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x0a,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74,
+ 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x6c, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6c,
+ 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07,
+ 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69,
+ 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1,
+ 0x01, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14,
+ 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63,
+ 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06,
+ 0x20, 0x03, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c,
+ 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02,
+ 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x04, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06,
+ 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f,
+ 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d,
+ 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72,
+ 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x53, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02,
+ 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x11, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03,
+ 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e,
+ 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10,
+ 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x03, 0x67, 0x74, 0x65,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x11, 0x52, 0x02, 0x69, 0x6e,
+ 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x11,
+ 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72,
+ 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69,
+ 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x53,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74,
+ 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12, 0x52, 0x02, 0x6c, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x12, 0x52, 0x03, 0x6c,
+ 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x02,
+ 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52,
+ 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x12,
+ 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07,
+ 0x20, 0x03, 0x28, 0x12, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69,
+ 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb2,
+ 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12,
+ 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x07, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x07, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x07, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x07, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x07, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x07, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e,
+ 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x22, 0xb2, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52,
+ 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02,
+ 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03,
+ 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x06, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e,
+ 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x06, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15,
+ 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x06, 0x52, 0x05,
+ 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f,
+ 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e,
+ 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb3, 0x01, 0x0a, 0x0d, 0x53, 0x46, 0x69,
+ 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74,
+ 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x02, 0x6c, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x03, 0x6c,
+ 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x02,
+ 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0f, 0x52,
+ 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0f,
+ 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07,
+ 0x20, 0x03, 0x28, 0x0f, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69,
+ 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb3,
+ 0x01, 0x0a, 0x0d, 0x53, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x10, 0x52,
+ 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x10, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x10, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x10, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x10, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e,
+ 0x18, 0x06, 0x20, 0x03, 0x28, 0x10, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x10, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49,
+ 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74,
+ 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x22, 0x21, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65,
+ 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x22, 0xd4, 0x05, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x10, 0x0a,
+ 0x03, 0x6c, 0x65, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6c, 0x65, 0x6e, 0x12,
+ 0x17, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f,
+ 0x6c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x4c, 0x65,
+ 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x14,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x65, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b,
+ 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d,
+ 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08,
+ 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74,
+ 0x65, 0x72, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65,
+ 0x72, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75,
+ 0x66, 0x66, 0x69, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66,
+ 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x17,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x02, 0x69,
+ 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69,
+ 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c,
+ 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01,
+ 0x28, 0x08, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10,
+ 0x0a, 0x02, 0x69, 0x70, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x02, 0x69, 0x70,
+ 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00,
+ 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, 0x18, 0x10,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x03,
+ 0x75, 0x72, 0x69, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, 0x69,
+ 0x12, 0x19, 0x0a, 0x07, 0x75, 0x72, 0x69, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x12, 0x20, 0x01, 0x28,
+ 0x08, 0x48, 0x00, 0x52, 0x06, 0x75, 0x72, 0x69, 0x52, 0x65, 0x66, 0x12, 0x1a, 0x0a, 0x07, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x07,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18,
+ 0x16, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x40, 0x0a,
+ 0x10, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65,
+ 0x78, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x48, 0x00, 0x52,
+ 0x0e, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12,
+ 0x1c, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x21, 0x0a,
+ 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x1a, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x42, 0x0c, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x22, 0xe2,
+ 0x02, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a,
+ 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x65, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x03, 0x6c, 0x65, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x12, 0x17,
+ 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x06, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65,
+ 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72,
+ 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66,
+ 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69,
+ 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a,
+ 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x6e,
+ 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x10, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
+ 0x48, 0x00, 0x52, 0x02, 0x69, 0x70, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x0b,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04,
+ 0x69, 0x70, 0x76, 0x36, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70,
+ 0x76, 0x36, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70,
+ 0x74, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0c, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x22, 0x6b, 0x0a, 0x09, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65,
+ 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x64, 0x65,
+ 0x66, 0x69, 0x6e, 0x65, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e,
+ 0x22, 0x3e, 0x0a, 0x0c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04,
+ 0x73, 0x6b, 0x69, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x22, 0xb0, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x52, 0x75, 0x6c,
+ 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x16, 0x0a, 0x06,
+ 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x6e,
+ 0x69, 0x71, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73,
+ 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x22, 0xdc, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x1b, 0x0a,
+ 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x08, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f,
+ 0x5f, 0x73, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e,
+ 0x6f, 0x53, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x04, 0x6b, 0x65, 0x79,
+ 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12,
+ 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x22, 0x4d, 0x0a, 0x08, 0x41, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1a,
+ 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49,
+ 0x6e, 0x22, 0xe9, 0x02, 0x0a, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12,
+ 0x2f, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74,
+ 0x12, 0x29, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x6c,
+ 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x02, 0x67, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x74, 0x65,
+ 0x12, 0x29, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x30, 0x0a, 0x06, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x22, 0xf3, 0x02,
+ 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x05,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x2a,
+ 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x6c, 0x74,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x02, 0x67, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x03, 0x67,
+ 0x74, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x74, 0x5f, 0x6e, 0x6f, 0x77, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x05, 0x6c, 0x74, 0x4e, 0x6f, 0x77, 0x12, 0x15, 0x0a, 0x06, 0x67, 0x74, 0x5f,
+ 0x6e, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x67, 0x74, 0x4e, 0x6f, 0x77,
+ 0x12, 0x31, 0x0a, 0x06, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x77, 0x69, 0x74,
+ 0x68, 0x69, 0x6e, 0x2a, 0x46, 0x0a, 0x0a, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65,
+ 0x78, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14,
+ 0x0a, 0x10, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x5f, 0x4e, 0x41,
+ 0x4d, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x48, 0x45, 0x41,
+ 0x44, 0x45, 0x52, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x02, 0x3a, 0x3c, 0x0a, 0x08, 0x64,
+ 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x3a, 0x0a, 0x07, 0x69, 0x67, 0x6e,
+ 0x6f, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x67,
+ 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x3a, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65,
+ 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65,
+ 0x64, 0x3a, 0x4a, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x50, 0x0a,
+ 0x1a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x70,
+ 0x67, 0x76, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5a, 0x32, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f,
+ 0x78, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+}
+
+var (
+ file_validate_validate_proto_rawDescOnce sync.Once
+ file_validate_validate_proto_rawDescData = file_validate_validate_proto_rawDesc
+)
+
+func file_validate_validate_proto_rawDescGZIP() []byte {
+ file_validate_validate_proto_rawDescOnce.Do(func() {
+ file_validate_validate_proto_rawDescData = protoimpl.X.CompressGZIP(file_validate_validate_proto_rawDescData)
+ })
+ return file_validate_validate_proto_rawDescData
+}
+
+var file_validate_validate_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_validate_validate_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_validate_validate_proto_goTypes = []interface{}{
+ (KnownRegex)(0), // 0: validate.KnownRegex
+ (*FieldRules)(nil), // 1: validate.FieldRules
+ (*FloatRules)(nil), // 2: validate.FloatRules
+ (*DoubleRules)(nil), // 3: validate.DoubleRules
+ (*Int32Rules)(nil), // 4: validate.Int32Rules
+ (*Int64Rules)(nil), // 5: validate.Int64Rules
+ (*UInt32Rules)(nil), // 6: validate.UInt32Rules
+ (*UInt64Rules)(nil), // 7: validate.UInt64Rules
+ (*SInt32Rules)(nil), // 8: validate.SInt32Rules
+ (*SInt64Rules)(nil), // 9: validate.SInt64Rules
+ (*Fixed32Rules)(nil), // 10: validate.Fixed32Rules
+ (*Fixed64Rules)(nil), // 11: validate.Fixed64Rules
+ (*SFixed32Rules)(nil), // 12: validate.SFixed32Rules
+ (*SFixed64Rules)(nil), // 13: validate.SFixed64Rules
+ (*BoolRules)(nil), // 14: validate.BoolRules
+ (*StringRules)(nil), // 15: validate.StringRules
+ (*BytesRules)(nil), // 16: validate.BytesRules
+ (*EnumRules)(nil), // 17: validate.EnumRules
+ (*MessageRules)(nil), // 18: validate.MessageRules
+ (*RepeatedRules)(nil), // 19: validate.RepeatedRules
+ (*MapRules)(nil), // 20: validate.MapRules
+ (*AnyRules)(nil), // 21: validate.AnyRules
+ (*DurationRules)(nil), // 22: validate.DurationRules
+ (*TimestampRules)(nil), // 23: validate.TimestampRules
+ (*durationpb.Duration)(nil), // 24: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp
+ (*descriptorpb.MessageOptions)(nil), // 26: google.protobuf.MessageOptions
+ (*descriptorpb.OneofOptions)(nil), // 27: google.protobuf.OneofOptions
+ (*descriptorpb.FieldOptions)(nil), // 28: google.protobuf.FieldOptions
+}
+var file_validate_validate_proto_depIdxs = []int32{
+ 18, // 0: validate.FieldRules.message:type_name -> validate.MessageRules
+ 2, // 1: validate.FieldRules.float:type_name -> validate.FloatRules
+ 3, // 2: validate.FieldRules.double:type_name -> validate.DoubleRules
+ 4, // 3: validate.FieldRules.int32:type_name -> validate.Int32Rules
+ 5, // 4: validate.FieldRules.int64:type_name -> validate.Int64Rules
+ 6, // 5: validate.FieldRules.uint32:type_name -> validate.UInt32Rules
+ 7, // 6: validate.FieldRules.uint64:type_name -> validate.UInt64Rules
+ 8, // 7: validate.FieldRules.sint32:type_name -> validate.SInt32Rules
+ 9, // 8: validate.FieldRules.sint64:type_name -> validate.SInt64Rules
+ 10, // 9: validate.FieldRules.fixed32:type_name -> validate.Fixed32Rules
+ 11, // 10: validate.FieldRules.fixed64:type_name -> validate.Fixed64Rules
+ 12, // 11: validate.FieldRules.sfixed32:type_name -> validate.SFixed32Rules
+ 13, // 12: validate.FieldRules.sfixed64:type_name -> validate.SFixed64Rules
+ 14, // 13: validate.FieldRules.bool:type_name -> validate.BoolRules
+ 15, // 14: validate.FieldRules.string:type_name -> validate.StringRules
+ 16, // 15: validate.FieldRules.bytes:type_name -> validate.BytesRules
+ 17, // 16: validate.FieldRules.enum:type_name -> validate.EnumRules
+ 19, // 17: validate.FieldRules.repeated:type_name -> validate.RepeatedRules
+ 20, // 18: validate.FieldRules.map:type_name -> validate.MapRules
+ 21, // 19: validate.FieldRules.any:type_name -> validate.AnyRules
+ 22, // 20: validate.FieldRules.duration:type_name -> validate.DurationRules
+ 23, // 21: validate.FieldRules.timestamp:type_name -> validate.TimestampRules
+ 0, // 22: validate.StringRules.well_known_regex:type_name -> validate.KnownRegex
+ 1, // 23: validate.RepeatedRules.items:type_name -> validate.FieldRules
+ 1, // 24: validate.MapRules.keys:type_name -> validate.FieldRules
+ 1, // 25: validate.MapRules.values:type_name -> validate.FieldRules
+ 24, // 26: validate.DurationRules.const:type_name -> google.protobuf.Duration
+ 24, // 27: validate.DurationRules.lt:type_name -> google.protobuf.Duration
+ 24, // 28: validate.DurationRules.lte:type_name -> google.protobuf.Duration
+ 24, // 29: validate.DurationRules.gt:type_name -> google.protobuf.Duration
+ 24, // 30: validate.DurationRules.gte:type_name -> google.protobuf.Duration
+ 24, // 31: validate.DurationRules.in:type_name -> google.protobuf.Duration
+ 24, // 32: validate.DurationRules.not_in:type_name -> google.protobuf.Duration
+ 25, // 33: validate.TimestampRules.const:type_name -> google.protobuf.Timestamp
+ 25, // 34: validate.TimestampRules.lt:type_name -> google.protobuf.Timestamp
+ 25, // 35: validate.TimestampRules.lte:type_name -> google.protobuf.Timestamp
+ 25, // 36: validate.TimestampRules.gt:type_name -> google.protobuf.Timestamp
+ 25, // 37: validate.TimestampRules.gte:type_name -> google.protobuf.Timestamp
+ 24, // 38: validate.TimestampRules.within:type_name -> google.protobuf.Duration
+ 26, // 39: validate.disabled:extendee -> google.protobuf.MessageOptions
+ 26, // 40: validate.ignored:extendee -> google.protobuf.MessageOptions
+ 27, // 41: validate.required:extendee -> google.protobuf.OneofOptions
+ 28, // 42: validate.rules:extendee -> google.protobuf.FieldOptions
+ 1, // 43: validate.rules:type_name -> validate.FieldRules
+ 44, // [44:44] is the sub-list for method output_type
+ 44, // [44:44] is the sub-list for method input_type
+ 43, // [43:44] is the sub-list for extension type_name
+ 39, // [39:43] is the sub-list for extension extendee
+ 0, // [0:39] is the sub-list for field type_name
+}
+
+func init() { file_validate_validate_proto_init() }
+func file_validate_validate_proto_init() {
+ if File_validate_validate_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_validate_validate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FloatRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UInt32Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UInt64Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SInt32Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SInt64Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Fixed32Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Fixed64Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SFixed32Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SFixed64Rules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BoolRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StringRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BytesRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MessageRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RepeatedRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AnyRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DurationRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_validate_validate_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TimestampRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_validate_validate_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*FieldRules_Float)(nil),
+ (*FieldRules_Double)(nil),
+ (*FieldRules_Int32)(nil),
+ (*FieldRules_Int64)(nil),
+ (*FieldRules_Uint32)(nil),
+ (*FieldRules_Uint64)(nil),
+ (*FieldRules_Sint32)(nil),
+ (*FieldRules_Sint64)(nil),
+ (*FieldRules_Fixed32)(nil),
+ (*FieldRules_Fixed64)(nil),
+ (*FieldRules_Sfixed32)(nil),
+ (*FieldRules_Sfixed64)(nil),
+ (*FieldRules_Bool)(nil),
+ (*FieldRules_String_)(nil),
+ (*FieldRules_Bytes)(nil),
+ (*FieldRules_Enum)(nil),
+ (*FieldRules_Repeated)(nil),
+ (*FieldRules_Map)(nil),
+ (*FieldRules_Any)(nil),
+ (*FieldRules_Duration)(nil),
+ (*FieldRules_Timestamp)(nil),
+ }
+ file_validate_validate_proto_msgTypes[14].OneofWrappers = []interface{}{
+ (*StringRules_Email)(nil),
+ (*StringRules_Hostname)(nil),
+ (*StringRules_Ip)(nil),
+ (*StringRules_Ipv4)(nil),
+ (*StringRules_Ipv6)(nil),
+ (*StringRules_Uri)(nil),
+ (*StringRules_UriRef)(nil),
+ (*StringRules_Address)(nil),
+ (*StringRules_Uuid)(nil),
+ (*StringRules_WellKnownRegex)(nil),
+ }
+ file_validate_validate_proto_msgTypes[15].OneofWrappers = []interface{}{
+ (*BytesRules_Ip)(nil),
+ (*BytesRules_Ipv4)(nil),
+ (*BytesRules_Ipv6)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_validate_validate_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 23,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_validate_validate_proto_goTypes,
+ DependencyIndexes: file_validate_validate_proto_depIdxs,
+ EnumInfos: file_validate_validate_proto_enumTypes,
+ MessageInfos: file_validate_validate_proto_msgTypes,
+ ExtensionInfos: file_validate_validate_proto_extTypes,
+ }.Build()
+ File_validate_validate_proto = out.File
+ file_validate_validate_proto_rawDesc = nil
+ file_validate_validate_proto_goTypes = nil
+ file_validate_validate_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto
new file mode 100644
index 000000000..4195ecf9c
--- /dev/null
+++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto
@@ -0,0 +1,863 @@
+syntax = "proto2";
+package validate;
+
+option go_package = "github.com/envoyproxy/protoc-gen-validate/validate";
+option java_package = "io.envoyproxy.pgv.validate";
+
+import "google/protobuf/descriptor.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+// Validation rules applied at the message level
+extend google.protobuf.MessageOptions {
+ // Disabled nullifies any validation rules for this message, including any
+ // message fields associated with it that do support validation.
+ optional bool disabled = 1071;
+ // Ignore skips generation of validation methods for this message.
+ optional bool ignored = 1072;
+}
+
+// Validation rules applied at the oneof level
+extend google.protobuf.OneofOptions {
+ // Required ensures that exactly one the field options in a oneof is set;
+ // validation fails if no fields in the oneof are set.
+ optional bool required = 1071;
+}
+
+// Validation rules applied at the field level
+extend google.protobuf.FieldOptions {
+ // Rules specify the validations to be performed on this field. By default,
+ // no validation is performed against a field.
+ optional FieldRules rules = 1071;
+}
+
+// FieldRules encapsulates the rules for each type of field. Depending on the
+// field, the correct set should be used to ensure proper validations.
+message FieldRules {
+ optional MessageRules message = 17;
+ oneof type {
+ // Scalar Field Types
+ FloatRules float = 1;
+ DoubleRules double = 2;
+ Int32Rules int32 = 3;
+ Int64Rules int64 = 4;
+ UInt32Rules uint32 = 5;
+ UInt64Rules uint64 = 6;
+ SInt32Rules sint32 = 7;
+ SInt64Rules sint64 = 8;
+ Fixed32Rules fixed32 = 9;
+ Fixed64Rules fixed64 = 10;
+ SFixed32Rules sfixed32 = 11;
+ SFixed64Rules sfixed64 = 12;
+ BoolRules bool = 13;
+ StringRules string = 14;
+ BytesRules bytes = 15;
+
+ // Complex Field Types
+ EnumRules enum = 16;
+ RepeatedRules repeated = 18;
+ MapRules map = 19;
+
+ // Well-Known Field Types
+ AnyRules any = 20;
+ DurationRules duration = 21;
+ TimestampRules timestamp = 22;
+ }
+}
+
+// FloatRules describes the constraints applied to `float` values
+message FloatRules {
+ // Const specifies that this field must be exactly the specified value
+ optional float const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional float lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional float lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional float gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional float gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated float in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated float not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// DoubleRules describes the constraints applied to `double` values
+message DoubleRules {
+ // Const specifies that this field must be exactly the specified value
+ optional double const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional double lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional double lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional double gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional double gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated double in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated double not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// Int32Rules describes the constraints applied to `int32` values
+message Int32Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional int32 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional int32 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional int32 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional int32 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional int32 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated int32 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated int32 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// Int64Rules describes the constraints applied to `int64` values
+message Int64Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional int64 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional int64 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional int64 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional int64 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional int64 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated int64 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated int64 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// UInt32Rules describes the constraints applied to `uint32` values
+message UInt32Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional uint32 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional uint32 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional uint32 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional uint32 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional uint32 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated uint32 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated uint32 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// UInt64Rules describes the constraints applied to `uint64` values
+message UInt64Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional uint64 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional uint64 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional uint64 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional uint64 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional uint64 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated uint64 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated uint64 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// SInt32Rules describes the constraints applied to `sint32` values
+message SInt32Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional sint32 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional sint32 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional sint32 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional sint32 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional sint32 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated sint32 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated sint32 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// SInt64Rules describes the constraints applied to `sint64` values
+message SInt64Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional sint64 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional sint64 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional sint64 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional sint64 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional sint64 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated sint64 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated sint64 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// Fixed32Rules describes the constraints applied to `fixed32` values
+message Fixed32Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional fixed32 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional fixed32 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional fixed32 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional fixed32 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional fixed32 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated fixed32 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated fixed32 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// Fixed64Rules describes the constraints applied to `fixed64` values
+message Fixed64Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional fixed64 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional fixed64 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional fixed64 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional fixed64 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional fixed64 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated fixed64 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated fixed64 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// SFixed32Rules describes the constraints applied to `sfixed32` values
+message SFixed32Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional sfixed32 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional sfixed32 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional sfixed32 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional sfixed32 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional sfixed32 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated sfixed32 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated sfixed32 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// SFixed64Rules describes the constraints applied to `sfixed64` values
+message SFixed64Rules {
+ // Const specifies that this field must be exactly the specified value
+ optional sfixed64 const = 1;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional sfixed64 lt = 2;
+
+ // Lte specifies that this field must be less than or equal to the
+ // specified value, inclusive
+ optional sfixed64 lte = 3;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive. If the value of Gt is larger than a specified Lt or Lte, the
+ // range is reversed.
+ optional sfixed64 gt = 4;
+
+ // Gte specifies that this field must be greater than or equal to the
+ // specified value, inclusive. If the value of Gte is larger than a
+ // specified Lt or Lte, the range is reversed.
+ optional sfixed64 gte = 5;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated sfixed64 in = 6;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated sfixed64 not_in = 7;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 8;
+}
+
+// BoolRules describes the constraints applied to `bool` values
+message BoolRules {
+ // Const specifies that this field must be exactly the specified value
+ optional bool const = 1;
+}
+
+// StringRules describe the constraints applied to `string` values
+message StringRules {
+ // Const specifies that this field must be exactly the specified value
+ optional string const = 1;
+
+ // Len specifies that this field must be the specified number of
+ // characters (Unicode code points). Note that the number of
+ // characters may differ from the number of bytes in the string.
+ optional uint64 len = 19;
+
+ // MinLen specifies that this field must be the specified number of
+ // characters (Unicode code points) at a minimum. Note that the number of
+ // characters may differ from the number of bytes in the string.
+ optional uint64 min_len = 2;
+
+ // MaxLen specifies that this field must be the specified number of
+ // characters (Unicode code points) at a maximum. Note that the number of
+ // characters may differ from the number of bytes in the string.
+ optional uint64 max_len = 3;
+
+ // LenBytes specifies that this field must be the specified number of bytes
+ // at a minimum
+ optional uint64 len_bytes = 20;
+
+ // MinBytes specifies that this field must be the specified number of bytes
+ // at a minimum
+ optional uint64 min_bytes = 4;
+
+ // MaxBytes specifies that this field must be the specified number of bytes
+ // at a maximum
+ optional uint64 max_bytes = 5;
+
+ // Pattern specifes that this field must match against the specified
+ // regular expression (RE2 syntax). The included expression should elide
+ // any delimiters.
+ optional string pattern = 6;
+
+ // Prefix specifies that this field must have the specified substring at
+ // the beginning of the string.
+ optional string prefix = 7;
+
+ // Suffix specifies that this field must have the specified substring at
+ // the end of the string.
+ optional string suffix = 8;
+
+ // Contains specifies that this field must have the specified substring
+ // anywhere in the string.
+ optional string contains = 9;
+
+ // NotContains specifies that this field cannot have the specified substring
+ // anywhere in the string.
+ optional string not_contains = 23;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated string in = 10;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated string not_in = 11;
+
+ // WellKnown rules provide advanced constraints against common string
+ // patterns
+ oneof well_known {
+ // Email specifies that the field must be a valid email address as
+ // defined by RFC 5322
+ bool email = 12;
+
+ // Hostname specifies that the field must be a valid hostname as
+ // defined by RFC 1034. This constraint does not support
+ // internationalized domain names (IDNs).
+ bool hostname = 13;
+
+ // Ip specifies that the field must be a valid IP (v4 or v6) address.
+ // Valid IPv6 addresses should not include surrounding square brackets.
+ bool ip = 14;
+
+ // Ipv4 specifies that the field must be a valid IPv4 address.
+ bool ipv4 = 15;
+
+ // Ipv6 specifies that the field must be a valid IPv6 address. Valid
+ // IPv6 addresses should not include surrounding square brackets.
+ bool ipv6 = 16;
+
+ // Uri specifies that the field must be a valid, absolute URI as defined
+ // by RFC 3986
+ bool uri = 17;
+
+ // UriRef specifies that the field must be a valid URI as defined by RFC
+ // 3986 and may be relative or absolute.
+ bool uri_ref = 18;
+
+ // Address specifies that the field must be either a valid hostname as
+ // defined by RFC 1034 (which does not support internationalized domain
+ // names or IDNs), or it can be a valid IP (v4 or v6).
+ bool address = 21;
+
+ // Uuid specifies that the field must be a valid UUID as defined by
+ // RFC 4122
+ bool uuid = 22;
+
+ // WellKnownRegex specifies a common well known pattern defined as a regex.
+ KnownRegex well_known_regex = 24;
+ }
+
+ // This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable
+ // strict header validation.
+ // By default, this is true, and HTTP header validations are RFC-compliant.
+ // Setting to false will enable a looser validations that only disallows
+ // \r\n\0 characters, which can be used to bypass header matching rules.
+ optional bool strict = 25 [default = true];
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 26;
+}
+
+// WellKnownRegex contain some well-known patterns.
+enum KnownRegex {
+ UNKNOWN = 0;
+
+ // HTTP header name as defined by RFC 7230.
+ HTTP_HEADER_NAME = 1;
+
+ // HTTP header value as defined by RFC 7230.
+ HTTP_HEADER_VALUE = 2;
+}
+
+// BytesRules describe the constraints applied to `bytes` values
+message BytesRules {
+ // Const specifies that this field must be exactly the specified value
+ optional bytes const = 1;
+
+ // Len specifies that this field must be the specified number of bytes
+ optional uint64 len = 13;
+
+ // MinLen specifies that this field must be the specified number of bytes
+ // at a minimum
+ optional uint64 min_len = 2;
+
+ // MaxLen specifies that this field must be the specified number of bytes
+ // at a maximum
+ optional uint64 max_len = 3;
+
+ // Pattern specifes that this field must match against the specified
+ // regular expression (RE2 syntax). The included expression should elide
+ // any delimiters.
+ optional string pattern = 4;
+
+ // Prefix specifies that this field must have the specified bytes at the
+ // beginning of the string.
+ optional bytes prefix = 5;
+
+ // Suffix specifies that this field must have the specified bytes at the
+ // end of the string.
+ optional bytes suffix = 6;
+
+ // Contains specifies that this field must have the specified bytes
+ // anywhere in the string.
+ optional bytes contains = 7;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated bytes in = 8;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated bytes not_in = 9;
+
+ // WellKnown rules provide advanced constraints against common byte
+ // patterns
+ oneof well_known {
+ // Ip specifies that the field must be a valid IP (v4 or v6) address in
+ // byte format
+ bool ip = 10;
+
+ // Ipv4 specifies that the field must be a valid IPv4 address in byte
+ // format
+ bool ipv4 = 11;
+
+ // Ipv6 specifies that the field must be a valid IPv6 address in byte
+ // format
+ bool ipv6 = 12;
+ }
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 14;
+}
+
+// EnumRules describe the constraints applied to enum values
+message EnumRules {
+ // Const specifies that this field must be exactly the specified value
+ optional int32 const = 1;
+
+ // DefinedOnly specifies that this field must be only one of the defined
+ // values for this enum, failing on any undefined value.
+ optional bool defined_only = 2;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated int32 in = 3;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated int32 not_in = 4;
+}
+
+// MessageRules describe the constraints applied to embedded message values.
+// For message-type fields, validation is performed recursively.
+message MessageRules {
+ // Skip specifies that the validation rules of this field should not be
+ // evaluated
+ optional bool skip = 1;
+
+ // Required specifies that this field must be set
+ optional bool required = 2;
+}
+
+// RepeatedRules describe the constraints applied to `repeated` values
+message RepeatedRules {
+ // MinItems specifies that this field must have the specified number of
+ // items at a minimum
+ optional uint64 min_items = 1;
+
+ // MaxItems specifies that this field must have the specified number of
+ // items at a maximum
+ optional uint64 max_items = 2;
+
+ // Unique specifies that all elements in this field must be unique. This
+ // contraint is only applicable to scalar and enum types (messages are not
+ // supported).
+ optional bool unique = 3;
+
+ // Items specifies the contraints to be applied to each item in the field.
+ // Repeated message fields will still execute validation against each item
+ // unless skip is specified here.
+ optional FieldRules items = 4;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 5;
+}
+
+// MapRules describe the constraints applied to `map` values
+message MapRules {
+ // MinPairs specifies that this field must have the specified number of
+ // KVs at a minimum
+ optional uint64 min_pairs = 1;
+
+ // MaxPairs specifies that this field must have the specified number of
+ // KVs at a maximum
+ optional uint64 max_pairs = 2;
+
+ // NoSparse specifies values in this field cannot be unset. This only
+ // applies to map's with message value types.
+ optional bool no_sparse = 3;
+
+ // Keys specifies the constraints to be applied to each key in the field.
+ optional FieldRules keys = 4;
+
+ // Values specifies the constraints to be applied to the value of each key
+ // in the field. Message values will still have their validations evaluated
+ // unless skip is specified here.
+ optional FieldRules values = 5;
+
+ // IgnoreEmpty specifies that the validation rules of this field should be
+ // evaluated only if the field is not empty
+ optional bool ignore_empty = 6;
+}
+
+// AnyRules describe constraints applied exclusively to the
+// `google.protobuf.Any` well-known type
+message AnyRules {
+ // Required specifies that this field must be set
+ optional bool required = 1;
+
+ // In specifies that this field's `type_url` must be equal to one of the
+ // specified values.
+ repeated string in = 2;
+
+ // NotIn specifies that this field's `type_url` must not be equal to any of
+ // the specified values.
+ repeated string not_in = 3;
+}
+
+// DurationRules describe the constraints applied exclusively to the
+// `google.protobuf.Duration` well-known type
+message DurationRules {
+ // Required specifies that this field must be set
+ optional bool required = 1;
+
+ // Const specifies that this field must be exactly the specified value
+ optional google.protobuf.Duration const = 2;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional google.protobuf.Duration lt = 3;
+
+ // Lt specifies that this field must be less than the specified value,
+ // inclusive
+ optional google.protobuf.Duration lte = 4;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive
+ optional google.protobuf.Duration gt = 5;
+
+ // Gte specifies that this field must be greater than the specified value,
+ // inclusive
+ optional google.protobuf.Duration gte = 6;
+
+ // In specifies that this field must be equal to one of the specified
+ // values
+ repeated google.protobuf.Duration in = 7;
+
+ // NotIn specifies that this field cannot be equal to one of the specified
+ // values
+ repeated google.protobuf.Duration not_in = 8;
+}
+
+// TimestampRules describe the constraints applied exclusively to the
+// `google.protobuf.Timestamp` well-known type
+message TimestampRules {
+ // Required specifies that this field must be set
+ optional bool required = 1;
+
+ // Const specifies that this field must be exactly the specified value
+ optional google.protobuf.Timestamp const = 2;
+
+ // Lt specifies that this field must be less than the specified value,
+ // exclusive
+ optional google.protobuf.Timestamp lt = 3;
+
+ // Lte specifies that this field must be less than the specified value,
+ // inclusive
+ optional google.protobuf.Timestamp lte = 4;
+
+ // Gt specifies that this field must be greater than the specified value,
+ // exclusive
+ optional google.protobuf.Timestamp gt = 5;
+
+ // Gte specifies that this field must be greater than the specified value,
+ // inclusive
+ optional google.protobuf.Timestamp gte = 6;
+
+ // LtNow specifies that this must be less than the current time. LtNow
+ // can only be used with the Within rule.
+ optional bool lt_now = 7;
+
+ // GtNow specifies that this must be greater than the current time. GtNow
+ // can only be used with the Within rule.
+ optional bool gt_now = 8;
+
+ // Within specifies that this field must be within this duration of the
+ // current time. This constraint can be used alone or with the LtNow and
+ // GtNow rules.
+ optional google.protobuf.Duration within = 9;
+}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/.gitignore b/vendor/github.com/form3tech-oss/jwt-go/.gitignore
deleted file mode 100644
index c0e81a8d9..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-.DS_Store
-bin
-.idea/
-
-
diff --git a/vendor/github.com/form3tech-oss/jwt-go/.travis.yml b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml
deleted file mode 100644
index 3c7fb7e1a..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-
-script:
- - go vet ./...
- - go test -v ./...
-
-go:
- - 1.12
- - 1.13
- - 1.14
- - 1.15
- - tip
diff --git a/vendor/github.com/form3tech-oss/jwt-go/LICENSE b/vendor/github.com/form3tech-oss/jwt-go/LICENSE
deleted file mode 100644
index df83a9c2f..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/LICENSE
+++ /dev/null
@@ -1,8 +0,0 @@
-Copyright (c) 2012 Dave Grijalva
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md
deleted file mode 100644
index 7fc1f793c..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md
+++ /dev/null
@@ -1,97 +0,0 @@
-## Migration Guide from v2 -> v3
-
-Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
-
-### `Token.Claims` is now an interface type
-
-The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
-
-`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
-
-The old example for parsing a token looked like this..
-
-```go
- if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
- fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
- }
-```
-
-is now directly mapped to...
-
-```go
- if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
- claims := token.Claims.(jwt.MapClaims)
- fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
- }
-```
-
-`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
-
-```go
- type MyCustomClaims struct {
- User string
- *StandardClaims
- }
-
- if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
- claims := token.Claims.(*MyCustomClaims)
- fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
- }
-```
-
-### `ParseFromRequest` has been moved
-
-To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
-
-`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
-
-This simple parsing example:
-
-```go
- if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
- fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
- }
-```
-
-is directly mapped to:
-
-```go
- if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
- claims := token.Claims.(jwt.MapClaims)
- fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
- }
-```
-
-There are several concrete `Extractor` types provided for your convenience:
-
-* `HeaderExtractor` will search a list of headers until one contains content.
-* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
-* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
-* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
-* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
-* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
-
-
-### RSA signing methods no longer accept `[]byte` keys
-
-Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
-
-To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
-
-```go
- func keyLookupFunc(*Token) (interface{}, error) {
- // Don't forget to validate the alg is what you expect:
- if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
- return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
- }
-
- // Look up key
- key, err := lookupPublicKey(token.Header["kid"])
- if err != nil {
- return nil, err
- }
-
- // Unpack key from PEM encoded PKCS8
- return jwt.ParseRSAPublicKeyFromPEM(key)
- }
-```
diff --git a/vendor/github.com/form3tech-oss/jwt-go/README.md b/vendor/github.com/form3tech-oss/jwt-go/README.md
deleted file mode 100644
index d7749077f..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/README.md
+++ /dev/null
@@ -1,104 +0,0 @@
-# jwt-go
-
-[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
-[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go)
-
-A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
-
-**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
-
-**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
-
-**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
-
-## What the heck is a JWT?
-
-JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
-
-In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
-
-The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
-
-The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own.
-
-## What's in the box?
-
-This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
-
-## Examples
-
-See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
-
-* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
-* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
-* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
-
-## Extensions
-
-This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
-
-Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
-
-## Compliance
-
-This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
-
-* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
-
-## Project Status & Versioning
-
-This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
-
-This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
-
-While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
-
-**BREAKING CHANGES:***
-* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
-
-## Usage Tips
-
-### Signing vs Encryption
-
-A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
-
-* The author of the token was in the possession of the signing secret
-* The data has not been modified since it was signed
-
-It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
-
-### Choosing a Signing Method
-
-There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
-
-Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
-
-Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
-
-### Signing Methods and Key Types
-
-Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
-
-* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
-* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
-* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
-
-### JWT and OAuth
-
-It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
-
-Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
-
-* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
-* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
-* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
-
-### Troubleshooting
-
-This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
-
-## More
-
-Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
-
-The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/vendor/github.com/form3tech-oss/jwt-go/errors.go b/vendor/github.com/form3tech-oss/jwt-go/errors.go
deleted file mode 100644
index 1c93024aa..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/errors.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package jwt
-
-import (
- "errors"
-)
-
-// Error constants
-var (
- ErrInvalidKey = errors.New("key is invalid")
- ErrInvalidKeyType = errors.New("key is of invalid type")
- ErrHashUnavailable = errors.New("the requested hash function is unavailable")
-)
-
-// The errors that might occur when parsing and validating a token
-const (
- ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
- ValidationErrorUnverifiable // Token could not be verified because of signing problems
- ValidationErrorSignatureInvalid // Signature validation failed
-
- // Standard Claim validation errors
- ValidationErrorAudience // AUD validation failed
- ValidationErrorExpired // EXP validation failed
- ValidationErrorIssuedAt // IAT validation failed
- ValidationErrorIssuer // ISS validation failed
- ValidationErrorNotValidYet // NBF validation failed
- ValidationErrorId // JTI validation failed
- ValidationErrorClaimsInvalid // Generic claims validation error
-)
-
-// Helper for constructing a ValidationError with a string error message
-func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
- return &ValidationError{
- text: errorText,
- Errors: errorFlags,
- }
-}
-
-// The error from Parse if token is not valid
-type ValidationError struct {
- Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
- Errors uint32 // bitfield. see ValidationError... constants
- text string // errors that do not have a valid error just have text
-}
-
-// Validation error is an error type
-func (e ValidationError) Error() string {
- if e.Inner != nil {
- return e.Inner.Error()
- } else if e.text != "" {
- return e.text
- } else {
- return "token is invalid"
- }
-}
-
-// No errors
-func (e *ValidationError) valid() bool {
- return e.Errors == 0
-}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/map_claims.go b/vendor/github.com/form3tech-oss/jwt-go/map_claims.go
deleted file mode 100644
index 90ab6bea3..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/map_claims.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package jwt
-
-import (
- "encoding/json"
- "errors"
- // "fmt"
-)
-
-// Claims type that uses the map[string]interface{} for JSON decoding
-// This is the default claims type if you don't supply one
-type MapClaims map[string]interface{}
-
-// Compares the aud claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
- aud, ok := m["aud"].([]string)
- if !ok {
- strAud, ok := m["aud"].(string)
- if !ok {
- return false
- }
- aud = append(aud, strAud)
- }
-
- return verifyAud(aud, cmp, req)
-}
-
-// Compares the exp claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
- switch exp := m["exp"].(type) {
- case float64:
- return verifyExp(int64(exp), cmp, req)
- case json.Number:
- v, _ := exp.Int64()
- return verifyExp(v, cmp, req)
- }
- return req == false
-}
-
-// Compares the iat claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
- switch iat := m["iat"].(type) {
- case float64:
- return verifyIat(int64(iat), cmp, req)
- case json.Number:
- v, _ := iat.Int64()
- return verifyIat(v, cmp, req)
- }
- return req == false
-}
-
-// Compares the iss claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
- iss, _ := m["iss"].(string)
- return verifyIss(iss, cmp, req)
-}
-
-// Compares the nbf claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
- switch nbf := m["nbf"].(type) {
- case float64:
- return verifyNbf(int64(nbf), cmp, req)
- case json.Number:
- v, _ := nbf.Int64()
- return verifyNbf(v, cmp, req)
- }
- return req == false
-}
-
-// Validates time based claims "exp, iat, nbf".
-// There is no accounting for clock skew.
-// As well, if any of the above claims are not in the token, it will still
-// be considered a valid claim.
-func (m MapClaims) Valid() error {
- vErr := new(ValidationError)
- now := TimeFunc().Unix()
-
- if m.VerifyExpiresAt(now, false) == false {
- vErr.Inner = errors.New("Token is expired")
- vErr.Errors |= ValidationErrorExpired
- }
-
- if m.VerifyIssuedAt(now, false) == false {
- vErr.Inner = errors.New("Token used before issued")
- vErr.Errors |= ValidationErrorIssuedAt
- }
-
- if m.VerifyNotBefore(now, false) == false {
- vErr.Inner = errors.New("Token is not valid yet")
- vErr.Errors |= ValidationErrorNotValidYet
- }
-
- if vErr.valid() {
- return nil
- }
-
- return vErr
-}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/parser.go b/vendor/github.com/form3tech-oss/jwt-go/parser.go
deleted file mode 100644
index d6901d9ad..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/parser.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package jwt
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "strings"
-)
-
-type Parser struct {
- ValidMethods []string // If populated, only these methods will be considered valid
- UseJSONNumber bool // Use JSON Number format in JSON decoder
- SkipClaimsValidation bool // Skip claims validation during token parsing
-}
-
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
-func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
-}
-
-func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- token, parts, err := p.ParseUnverified(tokenString, claims)
- if err != nil {
- return token, err
- }
-
- // Verify signing method is in the required set
- if p.ValidMethods != nil {
- var signingMethodValid = false
- var alg = token.Method.Alg()
- for _, m := range p.ValidMethods {
- if m == alg {
- signingMethodValid = true
- break
- }
- }
- if !signingMethodValid {
- // signing method is not in the listed set
- return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
- }
- }
-
- // Lookup key
- var key interface{}
- if keyFunc == nil {
- // keyFunc was not provided. short circuiting validation
- return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
- }
- if key, err = keyFunc(token); err != nil {
- // keyFunc returned an error
- if ve, ok := err.(*ValidationError); ok {
- return token, ve
- }
- return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
- }
-
- vErr := &ValidationError{}
-
- // Validate Claims
- if !p.SkipClaimsValidation {
- if err := token.Claims.Valid(); err != nil {
-
- // If the Claims Valid returned an error, check if it is a validation error,
- // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
- if e, ok := err.(*ValidationError); !ok {
- vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
- } else {
- vErr = e
- }
- }
- }
-
- // Perform validation
- token.Signature = parts[2]
- if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
- vErr.Inner = err
- vErr.Errors |= ValidationErrorSignatureInvalid
- }
-
- if vErr.valid() {
- token.Valid = true
- return token, nil
- }
-
- return token, vErr
-}
-
-// WARNING: Don't use this method unless you know what you're doing
-//
-// This method parses the token but doesn't validate the signature. It's only
-// ever useful in cases where you know the signature is valid (because it has
-// been checked previously in the stack) and you want to extract values from
-// it.
-func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
- parts = strings.Split(tokenString, ".")
- if len(parts) != 3 {
- return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
- }
-
- token = &Token{Raw: tokenString}
-
- // parse Header
- var headerBytes []byte
- if headerBytes, err = DecodeSegment(parts[0]); err != nil {
- if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
- return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
- }
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // parse Claims
- var claimBytes []byte
- token.Claims = claims
-
- if claimBytes, err = DecodeSegment(parts[1]); err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
- if p.UseJSONNumber {
- dec.UseNumber()
- }
- // JSON Decode. Special case for map type to avoid weird pointer behavior
- if c, ok := token.Claims.(MapClaims); ok {
- err = dec.Decode(&c)
- } else {
- err = dec.Decode(&claims)
- }
- // Handle decode error
- if err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // Lookup signature method
- if method, ok := token.Header["alg"].(string); ok {
- if token.Method = GetSigningMethod(method); token.Method == nil {
- return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
- }
- } else {
- return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
- }
-
- return token, parts, nil
-}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go b/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go
deleted file mode 100644
index 14c78c292..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package jwt
-
-import (
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key")
- ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
-)
-
-// Parse PEM encoded PKCS1 or PKCS8 private key
-func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
-func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
-
- var blockDecrypted []byte
- if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
- return nil, err
- }
-
- if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 public key
-func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- var pkey *rsa.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
- return nil, ErrNotRSAPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/token.go b/vendor/github.com/form3tech-oss/jwt-go/token.go
deleted file mode 100644
index d637e0867..000000000
--- a/vendor/github.com/form3tech-oss/jwt-go/token.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package jwt
-
-import (
- "encoding/base64"
- "encoding/json"
- "strings"
- "time"
-)
-
-// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
-// You can override it to use another time value. This is useful for testing or if your
-// server uses a different time zone than your tokens.
-var TimeFunc = time.Now
-
-// Parse methods use this callback function to supply
-// the key for verification. The function receives the parsed,
-// but unverified Token. This allows you to use properties in the
-// Header of the token (such as `kid`) to identify which key to use.
-type Keyfunc func(*Token) (interface{}, error)
-
-// A JWT Token. Different fields will be used depending on whether you're
-// creating or parsing/verifying a token.
-type Token struct {
- Raw string // The raw token. Populated when you Parse a token
- Method SigningMethod // The signing method used or to be used
- Header map[string]interface{} // The first segment of the token
- Claims Claims // The second segment of the token
- Signature string // The third segment of the token. Populated when you Parse a token
- Valid bool // Is the token valid? Populated when you Parse/Verify a token
-}
-
-// Create a new Token. Takes a signing method
-func New(method SigningMethod) *Token {
- return NewWithClaims(method, MapClaims{})
-}
-
-func NewWithClaims(method SigningMethod, claims Claims) *Token {
- return &Token{
- Header: map[string]interface{}{
- "typ": "JWT",
- "alg": method.Alg(),
- },
- Claims: claims,
- Method: method,
- }
-}
-
-// Get the complete, signed token
-func (t *Token) SignedString(key interface{}) (string, error) {
- var sig, sstr string
- var err error
- if sstr, err = t.SigningString(); err != nil {
- return "", err
- }
- if sig, err = t.Method.Sign(sstr, key); err != nil {
- return "", err
- }
- return strings.Join([]string{sstr, sig}, "."), nil
-}
-
-// Generate the signing string. This is the
-// most expensive part of the whole deal. Unless you
-// need this for something special, just go straight for
-// the SignedString.
-func (t *Token) SigningString() (string, error) {
- var err error
- parts := make([]string, 2)
- for i, _ := range parts {
- var jsonValue []byte
- if i == 0 {
- if jsonValue, err = json.Marshal(t.Header); err != nil {
- return "", err
- }
- } else {
- if jsonValue, err = json.Marshal(t.Claims); err != nil {
- return "", err
- }
- }
-
- parts[i] = EncodeSegment(jsonValue)
- }
- return strings.Join(parts, "."), nil
-}
-
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
-func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).Parse(tokenString, keyFunc)
-}
-
-func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
-}
-
-// Encode JWT specific base64url encoding with padding stripped
-func EncodeSegment(seg []byte) string {
- return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
-}
-
-// Decode JWT specific base64url encoding with padding stripped
-func DecodeSegment(seg string) ([]byte, error) {
- if l := len(seg) % 4; l > 0 {
- seg += strings.Repeat("=", 4-l)
- }
-
- return base64.URLEncoding.DecodeString(seg)
-}
diff --git a/vendor/github.com/go-kit/log/.gitignore b/vendor/github.com/go-kit/log/.gitignore
new file mode 100644
index 000000000..66fd13c90
--- /dev/null
+++ b/vendor/github.com/go-kit/log/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/go-kit/log/LICENSE b/vendor/github.com/go-kit/log/LICENSE
new file mode 100644
index 000000000..bb5bdb9cb
--- /dev/null
+++ b/vendor/github.com/go-kit/log/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Go kit
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md
new file mode 100644
index 000000000..a0931951d
--- /dev/null
+++ b/vendor/github.com/go-kit/log/README.md
@@ -0,0 +1,151 @@
+# package log
+
+`package log` provides a minimal interface for structured logging in services.
+It may be wrapped to encode conventions, enforce type-safety, provide leveled
+logging, and so on. It can be used for both typical application log events,
+and log-structured data streams.
+
+## Structured logging
+
+Structured logging is, basically, conceding to the reality that logs are
+_data_, and warrant some level of schematic rigor. Using a stricter,
+key/value-oriented message format for our logs, containing contextual and
+semantic information, makes it much easier to get insight into the
+operational activity of the systems we build. Consequently, `package log` is
+of the strong belief that "[the benefits of structured logging outweigh the
+minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
+
+Migrating from unstructured to structured logging is probably a lot easier
+than you'd expect.
+
+```go
+// Unstructured
+log.Printf("HTTP server listening on %s", addr)
+
+// Structured
+logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
+```
+
+## Usage
+
+### Typical application logging
+
+```go
+w := log.NewSyncWriter(os.Stderr)
+logger := log.NewLogfmtLogger(w)
+logger.Log("question", "what is the meaning of life?", "answer", 42)
+
+// Output:
+// question="what is the meaning of life?" answer=42
+```
+
+### Contextual Loggers
+
+```go
+func main() {
+ var logger log.Logger
+ logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+ logger = log.With(logger, "instance_id", 123)
+
+ logger.Log("msg", "starting")
+ NewWorker(log.With(logger, "component", "worker")).Run()
+ NewSlacker(log.With(logger, "component", "slacker")).Run()
+}
+
+// Output:
+// instance_id=123 msg=starting
+// instance_id=123 component=worker msg=running
+// instance_id=123 component=slacker msg=running
+```
+
+### Interact with stdlib logger
+
+Redirect stdlib logger to Go kit logger.
+
+```go
+import (
+ "os"
+ stdlog "log"
+ kitlog "github.com/go-kit/log"
+)
+
+func main() {
+ logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
+ stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
+ stdlog.Print("I sure like pie")
+}
+
+// Output:
+// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
+```
+
+Or, if, for legacy reasons, you need to pipe all of your logging through the
+stdlib log package, you can redirect Go kit logger to the stdlib logger.
+
+```go
+logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
+logger.Log("legacy", true, "msg", "at least it's something")
+
+// Output:
+// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
+```
+
+### Timestamps and callers
+
+```go
+var logger log.Logger
+logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+
+logger.Log("msg", "hello")
+
+// Output:
+// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
+```
+
+## Levels
+
+Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/log/level).
+
+## Supported output formats
+
+- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
+- JSON
+
+## Enhancements
+
+`package log` is centered on the one-method Logger interface.
+
+```go
+type Logger interface {
+ Log(keyvals ...interface{}) error
+}
+```
+
+This interface, and its supporting code like is the product of much iteration
+and evaluation. For more details on the evolution of the Logger interface,
+see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
+a talk by [Chris Hines](https://github.com/ChrisHines).
+Also, please see
+[#63](https://github.com/go-kit/kit/issues/63),
+[#76](https://github.com/go-kit/kit/pull/76),
+[#131](https://github.com/go-kit/kit/issues/131),
+[#157](https://github.com/go-kit/kit/pull/157),
+[#164](https://github.com/go-kit/kit/issues/164), and
+[#252](https://github.com/go-kit/kit/pull/252)
+to review historical conversations about package log and the Logger interface.
+
+Value-add packages and suggestions,
+like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/log/level),
+are of course welcome. Good proposals should
+
+- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/log#With),
+- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/log#Caller) in any wrapped contextual loggers, and
+- Be friendly to packages that accept only an unadorned log.Logger.
+
+## Benchmarks & comparisons
+
+There are a few Go logging benchmarks and comparisons that include Go kit's package log.
+
+- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
+- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
diff --git a/vendor/github.com/go-kit/log/doc.go b/vendor/github.com/go-kit/log/doc.go
new file mode 100644
index 000000000..f744382fe
--- /dev/null
+++ b/vendor/github.com/go-kit/log/doc.go
@@ -0,0 +1,116 @@
+// Package log provides a structured logger.
+//
+// Structured logging produces logs easily consumed later by humans or
+// machines. Humans might be interested in debugging errors, or tracing
+// specific requests. Machines might be interested in counting interesting
+// events, or aggregating information for off-line processing. In both cases,
+// it is important that the log messages are structured and actionable.
+// Package log is designed to encourage both of these best practices.
+//
+// Basic Usage
+//
+// The fundamental interface is Logger. Loggers create log events from
+// key/value data. The Logger interface has a single method, Log, which
+// accepts a sequence of alternating key/value pairs, which this package names
+// keyvals.
+//
+// type Logger interface {
+// Log(keyvals ...interface{}) error
+// }
+//
+// Here is an example of a function using a Logger to create log events.
+//
+// func RunTask(task Task, logger log.Logger) string {
+// logger.Log("taskID", task.ID, "event", "starting task")
+// ...
+// logger.Log("taskID", task.ID, "event", "task complete")
+// }
+//
+// The keys in the above example are "taskID" and "event". The values are
+// task.ID, "starting task", and "task complete". Every key is followed
+// immediately by its value.
+//
+// Keys are usually plain strings. Values may be any type that has a sensible
+// encoding in the chosen log format. With structured logging it is a good
+// idea to log simple values without formatting them. This practice allows
+// the chosen logger to encode values in the most appropriate way.
+//
+// Contextual Loggers
+//
+// A contextual logger stores keyvals that it includes in all log events.
+// Building appropriate contextual loggers reduces repetition and aids
+// consistency in the resulting log output. With, WithPrefix, and WithSuffix
+// add context to a logger. We can use With to improve the RunTask example.
+//
+// func RunTask(task Task, logger log.Logger) string {
+// logger = log.With(logger, "taskID", task.ID)
+// logger.Log("event", "starting task")
+// ...
+// taskHelper(task.Cmd, logger)
+// ...
+// logger.Log("event", "task complete")
+// }
+//
+// The improved version emits the same log events as the original for the
+// first and last calls to Log. Passing the contextual logger to taskHelper
+// enables each log event created by taskHelper to include the task.ID even
+// though taskHelper does not have access to that value. Using contextual
+// loggers this way simplifies producing log output that enables tracing the
+// life cycle of individual tasks. (See the Contextual example for the full
+// code of the above snippet.)
+//
+// Dynamic Contextual Values
+//
+// A Valuer function stored in a contextual logger generates a new value each
+// time an event is logged. The Valuer example demonstrates how this feature
+// works.
+//
+// Valuers provide the basis for consistently logging timestamps and source
+// code location. The log package defines several valuers for that purpose.
+// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
+// DefaultCaller. A common logger initialization sequence that ensures all log
+// entries contain a timestamp and source location looks like this:
+//
+// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
+// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+//
+// Concurrent Safety
+//
+// Applications with multiple goroutines want each log event written to the
+// same logger to remain separate from other log events. Package log provides
+// two simple solutions for concurrent safe logging.
+//
+// NewSyncWriter wraps an io.Writer and serializes each call to its Write
+// method. Using a SyncWriter has the benefit that the smallest practical
+// portion of the logging logic is performed within a mutex, but it requires
+// the formatting Logger to make only one call to Write per log event.
+//
+// NewSyncLogger wraps any Logger and serializes each call to its Log method.
+// Using a SyncLogger has the benefit that it guarantees each log event is
+// handled atomically within the wrapped logger, but it typically serializes
+// both the formatting and output logic. Use a SyncLogger if the formatting
+// logger may perform multiple writes per log event.
+//
+// Error Handling
+//
+// This package relies on the practice of wrapping or decorating loggers with
+// other loggers to provide composable pieces of functionality. It also means
+// that Logger.Log must return an error because some
+// implementations—especially those that output log data to an io.Writer—may
+// encounter errors that cannot be handled locally. This in turn means that
+// Loggers that wrap other loggers should return errors from the wrapped
+// logger up the stack.
+//
+// Fortunately, the decorator pattern also provides a way to avoid the
+// necessity to check for errors every time an application calls Logger.Log.
+// An application required to panic whenever its Logger encounters
+// an error could initialize its logger as follows.
+//
+// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
+// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
+// if err := fmtlogger.Log(keyvals...); err != nil {
+// panic(err)
+// }
+// return nil
+// })
+package log
diff --git a/vendor/github.com/go-kit/log/go.mod b/vendor/github.com/go-kit/log/go.mod
new file mode 100644
index 000000000..7c87949ef
--- /dev/null
+++ b/vendor/github.com/go-kit/log/go.mod
@@ -0,0 +1,8 @@
+module github.com/go-kit/log
+
+go 1.16
+
+require (
+ github.com/go-logfmt/logfmt v0.5.0
+ github.com/go-stack/stack v1.8.0
+)
diff --git a/vendor/github.com/go-kit/log/go.sum b/vendor/github.com/go-kit/log/go.sum
new file mode 100644
index 000000000..59a7cf6c2
--- /dev/null
+++ b/vendor/github.com/go-kit/log/go.sum
@@ -0,0 +1,4 @@
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go
new file mode 100644
index 000000000..0cedbf824
--- /dev/null
+++ b/vendor/github.com/go-kit/log/json_logger.go
@@ -0,0 +1,91 @@
+package log
+
+import (
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+)
+
+type jsonLogger struct {
+ io.Writer
+}
+
+// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
+// single JSON object. Each log event produces no more than one call to
+// w.Write. The passed Writer must be safe for concurrent use by multiple
+// goroutines if the returned Logger will be used concurrently.
+func NewJSONLogger(w io.Writer) Logger {
+ return &jsonLogger{w}
+}
+
+func (l *jsonLogger) Log(keyvals ...interface{}) error {
+ n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
+ m := make(map[string]interface{}, n)
+ for i := 0; i < len(keyvals); i += 2 {
+ k := keyvals[i]
+ var v interface{} = ErrMissingValue
+ if i+1 < len(keyvals) {
+ v = keyvals[i+1]
+ }
+ merge(m, k, v)
+ }
+ enc := json.NewEncoder(l.Writer)
+ enc.SetEscapeHTML(false)
+ return enc.Encode(m)
+}
+
+func merge(dst map[string]interface{}, k, v interface{}) {
+ var key string
+ switch x := k.(type) {
+ case string:
+ key = x
+ case fmt.Stringer:
+ key = safeString(x)
+ default:
+ key = fmt.Sprint(x)
+ }
+
+ // We want json.Marshaler and encoding.TextMarshaller to take priority over
+ // err.Error() and v.String(). But json.Marshall (called later) does that by
+ // default so we force a no-op if it's one of those 2 case.
+ switch x := v.(type) {
+ case json.Marshaler:
+ case encoding.TextMarshaler:
+ case error:
+ v = safeError(x)
+ case fmt.Stringer:
+ v = safeString(x)
+ }
+
+ dst[key] = v
+}
+
+func safeString(str fmt.Stringer) (s string) {
+ defer func() {
+ if panicVal := recover(); panicVal != nil {
+ if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
+ s = "NULL"
+ } else {
+ panic(panicVal)
+ }
+ }
+ }()
+ s = str.String()
+ return
+}
+
+func safeError(err error) (s interface{}) {
+ defer func() {
+ if panicVal := recover(); panicVal != nil {
+ if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
+ s = nil
+ } else {
+ panic(panicVal)
+ }
+ }
+ }()
+ s = err.Error()
+ return
+}
diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go
new file mode 100644
index 000000000..505d307b1
--- /dev/null
+++ b/vendor/github.com/go-kit/log/level/doc.go
@@ -0,0 +1,22 @@
+// Package level implements leveled logging on top of Go kit's log package. To
+// use the level package, create a logger as per normal in your func main, and
+// wrap it with level.NewFilter.
+//
+// var logger log.Logger
+// logger = log.NewLogfmtLogger(os.Stderr)
+// logger = level.NewFilter(logger, level.AllowInfo()) // <--
+// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
+//
+// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
+// helper methods to emit leveled log events.
+//
+// logger.Log("foo", "bar") // as normal, no level
+// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
+// if value > 100 {
+// level.Error(logger).Log("value", value)
+// }
+//
+// NewFilter allows precise control over what happens when a log event is
+// emitted without a level key, or if a squelched level is used. Check the
+// Option functions for details.
+package level
diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go
new file mode 100644
index 000000000..c94756c6b
--- /dev/null
+++ b/vendor/github.com/go-kit/log/level/level.go
@@ -0,0 +1,205 @@
+package level
+
+import "github.com/go-kit/log"
+
+// Error returns a logger that includes a Key/ErrorValue pair.
+func Error(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), ErrorValue())
+}
+
+// Warn returns a logger that includes a Key/WarnValue pair.
+func Warn(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), WarnValue())
+}
+
+// Info returns a logger that includes a Key/InfoValue pair.
+func Info(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), InfoValue())
+}
+
+// Debug returns a logger that includes a Key/DebugValue pair.
+func Debug(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), DebugValue())
+}
+
+// NewFilter wraps next and implements level filtering. See the commentary on
+// the Option functions for a detailed description of how to configure levels.
+// If no options are provided, all leveled log events created with Debug,
+// Info, Warn or Error helper methods are squelched and non-leveled log
+// events are passed to next unmodified.
+func NewFilter(next log.Logger, options ...Option) log.Logger {
+ l := &logger{
+ next: next,
+ }
+ for _, option := range options {
+ option(l)
+ }
+ return l
+}
+
+type logger struct {
+ next log.Logger
+ allowed level
+ squelchNoLevel bool
+ errNotAllowed error
+ errNoLevel error
+}
+
+func (l *logger) Log(keyvals ...interface{}) error {
+ var hasLevel, levelAllowed bool
+ for i := 1; i < len(keyvals); i += 2 {
+ if v, ok := keyvals[i].(*levelValue); ok {
+ hasLevel = true
+ levelAllowed = l.allowed&v.level != 0
+ break
+ }
+ }
+ if !hasLevel && l.squelchNoLevel {
+ return l.errNoLevel
+ }
+ if hasLevel && !levelAllowed {
+ return l.errNotAllowed
+ }
+ return l.next.Log(keyvals...)
+}
+
+// Option sets a parameter for the leveled logger.
+type Option func(*logger)
+
+// AllowAll is an alias for AllowDebug.
+func AllowAll() Option {
+ return AllowDebug()
+}
+
+// AllowDebug allows error, warn, info and debug level log events to pass.
+func AllowDebug() Option {
+ return allowed(levelError | levelWarn | levelInfo | levelDebug)
+}
+
+// AllowInfo allows error, warn and info level log events to pass.
+func AllowInfo() Option {
+ return allowed(levelError | levelWarn | levelInfo)
+}
+
+// AllowWarn allows error and warn level log events to pass.
+func AllowWarn() Option {
+ return allowed(levelError | levelWarn)
+}
+
+// AllowError allows only error level log events to pass.
+func AllowError() Option {
+ return allowed(levelError)
+}
+
+// AllowNone allows no leveled log events to pass.
+func AllowNone() Option {
+ return allowed(0)
+}
+
+func allowed(allowed level) Option {
+ return func(l *logger) { l.allowed = allowed }
+}
+
+// ErrNotAllowed sets the error to return from Log when it squelches a log
+// event disallowed by the configured Allow[Level] option. By default,
+// ErrNotAllowed is nil; in this case the log event is squelched with no
+// error.
+func ErrNotAllowed(err error) Option {
+ return func(l *logger) { l.errNotAllowed = err }
+}
+
+// SquelchNoLevel instructs Log to squelch log events with no level, so that
+// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
+// to true and a log event is squelched in this way, the error value
+// configured with ErrNoLevel is returned to the caller.
+func SquelchNoLevel(squelch bool) Option {
+ return func(l *logger) { l.squelchNoLevel = squelch }
+}
+
+// ErrNoLevel sets the error to return from Log when it squelches a log event
+// with no level. By default, ErrNoLevel is nil; in this case the log event is
+// squelched with no error.
+func ErrNoLevel(err error) Option {
+ return func(l *logger) { l.errNoLevel = err }
+}
+
+// NewInjector wraps next and returns a logger that adds a Key/level pair to
+// the beginning of log events that don't already contain a level. In effect,
+// this gives a default level to logs without a level.
+func NewInjector(next log.Logger, level Value) log.Logger {
+ return &injector{
+ next: next,
+ level: level,
+ }
+}
+
+type injector struct {
+ next log.Logger
+ level interface{}
+}
+
+func (l *injector) Log(keyvals ...interface{}) error {
+ for i := 1; i < len(keyvals); i += 2 {
+ if _, ok := keyvals[i].(*levelValue); ok {
+ return l.next.Log(keyvals...)
+ }
+ }
+ kvs := make([]interface{}, len(keyvals)+2)
+ kvs[0], kvs[1] = key, l.level
+ copy(kvs[2:], keyvals)
+ return l.next.Log(kvs...)
+}
+
+// Value is the interface that each of the canonical level values implement.
+// It contains unexported methods that prevent types from other packages from
+// implementing it and guaranteeing that NewFilter can distinguish the levels
+// defined in this package from all other values.
+type Value interface {
+ String() string
+ levelVal()
+}
+
+// Key returns the unique key added to log events by the loggers in this
+// package.
+func Key() interface{} { return key }
+
+// ErrorValue returns the unique value added to log events by Error.
+func ErrorValue() Value { return errorValue }
+
+// WarnValue returns the unique value added to log events by Warn.
+func WarnValue() Value { return warnValue }
+
+// InfoValue returns the unique value added to log events by Info.
+func InfoValue() Value { return infoValue }
+
+// DebugValue returns the unique value added to log events by Debug.
+func DebugValue() Value { return debugValue }
+
+var (
+ // key is of type interface{} so that it allocates once during package
+ // initialization and avoids allocating every time the value is added to a
+ // []interface{} later.
+ key interface{} = "level"
+
+ errorValue = &levelValue{level: levelError, name: "error"}
+ warnValue = &levelValue{level: levelWarn, name: "warn"}
+ infoValue = &levelValue{level: levelInfo, name: "info"}
+ debugValue = &levelValue{level: levelDebug, name: "debug"}
+)
+
+type level byte
+
+const (
+ levelDebug level = 1 << iota
+ levelInfo
+ levelWarn
+ levelError
+)
+
+type levelValue struct {
+ name string
+ level
+}
+
+func (v *levelValue) String() string { return v.name }
+func (v *levelValue) levelVal() {}
diff --git a/vendor/github.com/go-kit/log/log.go b/vendor/github.com/go-kit/log/log.go
new file mode 100644
index 000000000..62e11adac
--- /dev/null
+++ b/vendor/github.com/go-kit/log/log.go
@@ -0,0 +1,179 @@
+package log
+
+import "errors"
+
+// Logger is the fundamental interface for all log operations. Log creates a
+// log event from keyvals, a variadic sequence of alternating keys and values.
+// Implementations must be safe for concurrent use by multiple goroutines. In
+// particular, any implementation of Logger that appends to keyvals or
+// modifies or retains any of its elements must make a copy first.
+type Logger interface {
+ Log(keyvals ...interface{}) error
+}
+
+// ErrMissingValue is appended to keyvals slices with odd length to substitute
+// the missing value.
+var ErrMissingValue = errors.New("(MISSING)")
+
+// With returns a new contextual logger with keyvals prepended to those passed
+// to calls to Log. If logger is also a contextual logger created by With,
+// WithPrefix, or WithSuffix, keyvals is appended to the existing context.
+//
+// The returned Logger replaces all value elements (odd indexes) containing a
+// Valuer with their generated value for each call to its Log method.
+func With(logger Logger, keyvals ...interface{}) Logger {
+ if len(keyvals) == 0 {
+ return logger
+ }
+ l := newContext(logger)
+ kvs := append(l.keyvals, keyvals...)
+ if len(kvs)%2 != 0 {
+ kvs = append(kvs, ErrMissingValue)
+ }
+ return &context{
+ logger: l.logger,
+ // Limiting the capacity of the stored keyvals ensures that a new
+ // backing array is created if the slice must grow in Log or With.
+ // Using the extra capacity without copying risks a data race that
+ // would violate the Logger interface contract.
+ keyvals: kvs[:len(kvs):len(kvs)],
+ hasValuer: l.hasValuer || containsValuer(keyvals),
+ sKeyvals: l.sKeyvals,
+ sHasValuer: l.sHasValuer,
+ }
+}
+
+// WithPrefix returns a new contextual logger with keyvals prepended to those
+// passed to calls to Log. If logger is also a contextual logger created by
+// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context.
+//
+// The returned Logger replaces all value elements (odd indexes) containing a
+// Valuer with their generated value for each call to its Log method.
+func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
+ if len(keyvals) == 0 {
+ return logger
+ }
+ l := newContext(logger)
+ // Limiting the capacity of the stored keyvals ensures that a new
+ // backing array is created if the slice must grow in Log or With.
+ // Using the extra capacity without copying risks a data race that
+ // would violate the Logger interface contract.
+ n := len(l.keyvals) + len(keyvals)
+ if len(keyvals)%2 != 0 {
+ n++
+ }
+ kvs := make([]interface{}, 0, n)
+ kvs = append(kvs, keyvals...)
+ if len(kvs)%2 != 0 {
+ kvs = append(kvs, ErrMissingValue)
+ }
+ kvs = append(kvs, l.keyvals...)
+ return &context{
+ logger: l.logger,
+ keyvals: kvs,
+ hasValuer: l.hasValuer || containsValuer(keyvals),
+ sKeyvals: l.sKeyvals,
+ sHasValuer: l.sHasValuer,
+ }
+}
+
+// WithSuffix returns a new contextual logger with keyvals appended to those
+// passed to calls to Log. If logger is also a contextual logger created by
+// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context.
+//
+// The returned Logger replaces all value elements (odd indexes) containing a
+// Valuer with their generated value for each call to its Log method.
+func WithSuffix(logger Logger, keyvals ...interface{}) Logger {
+ if len(keyvals) == 0 {
+ return logger
+ }
+ l := newContext(logger)
+ // Limiting the capacity of the stored keyvals ensures that a new
+ // backing array is created if the slice must grow in Log or With.
+ // Using the extra capacity without copying risks a data race that
+ // would violate the Logger interface contract.
+ n := len(l.sKeyvals) + len(keyvals)
+ if len(keyvals)%2 != 0 {
+ n++
+ }
+ kvs := make([]interface{}, 0, n)
+ kvs = append(kvs, keyvals...)
+ if len(kvs)%2 != 0 {
+ kvs = append(kvs, ErrMissingValue)
+ }
+ kvs = append(l.sKeyvals, kvs...)
+ return &context{
+ logger: l.logger,
+ keyvals: l.keyvals,
+ hasValuer: l.hasValuer,
+ sKeyvals: kvs,
+ sHasValuer: l.sHasValuer || containsValuer(keyvals),
+ }
+}
+
+// context is the Logger implementation returned by With, WithPrefix, and
+// WithSuffix. It wraps a Logger and holds keyvals that it includes in all
+// log events. Its Log method calls bindValues to generate values for each
+// Valuer in the context keyvals.
+//
+// A context must always have the same number of stack frames between calls to
+// its Log method and the eventual binding of Valuers to their value. This
+// requirement comes from the functional requirement to allow a context to
+// resolve application call site information for a Caller stored in the
+// context. To do this we must be able to predict the number of logging
+// functions on the stack when bindValues is called.
+//
+// Two implementation details provide the needed stack depth consistency.
+//
+// 1. newContext avoids introducing an additional layer when asked to
+// wrap another context.
+// 2. With, WithPrefix, and WithSuffix avoid introducing an additional
+// layer by returning a newly constructed context with a merged keyvals
+// rather than simply wrapping the existing context.
+type context struct {
+ logger Logger
+ keyvals []interface{}
+ sKeyvals []interface{} // suffixes
+ hasValuer bool
+ sHasValuer bool
+}
+
+func newContext(logger Logger) *context {
+ if c, ok := logger.(*context); ok {
+ return c
+ }
+ return &context{logger: logger}
+}
+
+// Log replaces all value elements (odd indexes) containing a Valuer in the
+// stored context with their generated value, appends keyvals, and passes the
+// result to the wrapped Logger.
+func (l *context) Log(keyvals ...interface{}) error {
+ kvs := append(l.keyvals, keyvals...)
+ if len(kvs)%2 != 0 {
+ kvs = append(kvs, ErrMissingValue)
+ }
+ if l.hasValuer {
+ // If no keyvals were appended above then we must copy l.keyvals so
+ // that future log events will reevaluate the stored Valuers.
+ if len(keyvals) == 0 {
+ kvs = append([]interface{}{}, l.keyvals...)
+ }
+ bindValues(kvs[:(len(l.keyvals))])
+ }
+ kvs = append(kvs, l.sKeyvals...)
+ if l.sHasValuer {
+ bindValues(kvs[len(kvs)-len(l.sKeyvals):])
+ }
+ return l.logger.Log(kvs...)
+}
+
+// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
+// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
+// object that calls f.
+type LoggerFunc func(...interface{}) error
+
+// Log implements Logger by calling f(keyvals...).
+func (f LoggerFunc) Log(keyvals ...interface{}) error {
+ return f(keyvals...)
+}
diff --git a/vendor/github.com/go-kit/log/logfmt_logger.go b/vendor/github.com/go-kit/log/logfmt_logger.go
new file mode 100644
index 000000000..a00305298
--- /dev/null
+++ b/vendor/github.com/go-kit/log/logfmt_logger.go
@@ -0,0 +1,62 @@
+package log
+
+import (
+ "bytes"
+ "io"
+ "sync"
+
+ "github.com/go-logfmt/logfmt"
+)
+
+type logfmtEncoder struct {
+ *logfmt.Encoder
+ buf bytes.Buffer
+}
+
+func (l *logfmtEncoder) Reset() {
+ l.Encoder.Reset()
+ l.buf.Reset()
+}
+
+var logfmtEncoderPool = sync.Pool{
+ New: func() interface{} {
+ var enc logfmtEncoder
+ enc.Encoder = logfmt.NewEncoder(&enc.buf)
+ return &enc
+ },
+}
+
+type logfmtLogger struct {
+ w io.Writer
+}
+
+// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
+// logfmt format. Each log event produces no more than one call to w.Write.
+// The passed Writer must be safe for concurrent use by multiple goroutines if
+// the returned Logger will be used concurrently.
+func NewLogfmtLogger(w io.Writer) Logger {
+ return &logfmtLogger{w}
+}
+
+func (l logfmtLogger) Log(keyvals ...interface{}) error {
+ enc := logfmtEncoderPool.Get().(*logfmtEncoder)
+ enc.Reset()
+ defer logfmtEncoderPool.Put(enc)
+
+ if err := enc.EncodeKeyvals(keyvals...); err != nil {
+ return err
+ }
+
+ // Add newline to the end of the buffer
+ if err := enc.EndRecord(); err != nil {
+ return err
+ }
+
+ // The Logger interface requires implementations to be safe for concurrent
+ // use by multiple goroutines. For this implementation that means making
+ // only one call to l.w.Write() for each call to Log.
+ if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-kit/log/nop_logger.go b/vendor/github.com/go-kit/log/nop_logger.go
new file mode 100644
index 000000000..1047d626c
--- /dev/null
+++ b/vendor/github.com/go-kit/log/nop_logger.go
@@ -0,0 +1,8 @@
+package log
+
+type nopLogger struct{}
+
+// NewNopLogger returns a logger that doesn't do anything.
+func NewNopLogger() Logger { return nopLogger{} }
+
+func (nopLogger) Log(...interface{}) error { return nil }
diff --git a/vendor/github.com/go-kit/log/stdlib.go b/vendor/github.com/go-kit/log/stdlib.go
new file mode 100644
index 000000000..0338edbe2
--- /dev/null
+++ b/vendor/github.com/go-kit/log/stdlib.go
@@ -0,0 +1,151 @@
+package log
+
+import (
+ "bytes"
+ "io"
+ "log"
+ "regexp"
+ "strings"
+)
+
+// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
+// designed to be passed to a Go kit logger as the writer, for cases where
+// it's necessary to redirect all Go kit log output to the stdlib logger.
+//
+// If you have any choice in the matter, you shouldn't use this. Prefer to
+// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
+type StdlibWriter struct{}
+
+// Write implements io.Writer.
+func (w StdlibWriter) Write(p []byte) (int, error) {
+ log.Print(strings.TrimSpace(string(p)))
+ return len(p), nil
+}
+
+// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
+// logger's SetOutput. It will extract date/timestamps, filenames, and
+// messages, and place them under relevant keys.
+type StdlibAdapter struct {
+ Logger
+ timestampKey string
+ fileKey string
+ messageKey string
+ prefix string
+ joinPrefixToMsg bool
+}
+
+// StdlibAdapterOption sets a parameter for the StdlibAdapter.
+type StdlibAdapterOption func(*StdlibAdapter)
+
+// TimestampKey sets the key for the timestamp field. By default, it's "ts".
+func TimestampKey(key string) StdlibAdapterOption {
+ return func(a *StdlibAdapter) { a.timestampKey = key }
+}
+
+// FileKey sets the key for the file and line field. By default, it's "caller".
+func FileKey(key string) StdlibAdapterOption {
+ return func(a *StdlibAdapter) { a.fileKey = key }
+}
+
+// MessageKey sets the key for the actual log message. By default, it's "msg".
+func MessageKey(key string) StdlibAdapterOption {
+ return func(a *StdlibAdapter) { a.messageKey = key }
+}
+
+// Prefix configures the adapter to parse a prefix from stdlib log events. If
+// you provide a non-empty prefix to the stdlib logger, then your should provide
+// that same prefix to the adapter via this option.
+//
+// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to
+// true if you want to include the parsed prefix in the msg.
+func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption {
+ return func(a *StdlibAdapter) { a.prefix = prefix; a.joinPrefixToMsg = joinPrefixToMsg }
+}
+
+// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
+// logger. It's designed to be passed to log.SetOutput.
+func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
+ a := StdlibAdapter{
+ Logger: logger,
+ timestampKey: "ts",
+ fileKey: "caller",
+ messageKey: "msg",
+ }
+ for _, option := range options {
+ option(&a)
+ }
+ return a
+}
+
+func (a StdlibAdapter) Write(p []byte) (int, error) {
+ p = a.handlePrefix(p)
+
+ result := subexps(p)
+ keyvals := []interface{}{}
+ var timestamp string
+ if date, ok := result["date"]; ok && date != "" {
+ timestamp = date
+ }
+ if time, ok := result["time"]; ok && time != "" {
+ if timestamp != "" {
+ timestamp += " "
+ }
+ timestamp += time
+ }
+ if timestamp != "" {
+ keyvals = append(keyvals, a.timestampKey, timestamp)
+ }
+ if file, ok := result["file"]; ok && file != "" {
+ keyvals = append(keyvals, a.fileKey, file)
+ }
+ if msg, ok := result["msg"]; ok {
+ msg = a.handleMessagePrefix(msg)
+ keyvals = append(keyvals, a.messageKey, msg)
+ }
+ if err := a.Logger.Log(keyvals...); err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (a StdlibAdapter) handlePrefix(p []byte) []byte {
+ if a.prefix != "" {
+ p = bytes.TrimPrefix(p, []byte(a.prefix))
+ }
+ return p
+}
+
+func (a StdlibAdapter) handleMessagePrefix(msg string) string {
+ if a.prefix == "" {
+ return msg
+ }
+
+ msg = strings.TrimPrefix(msg, a.prefix)
+ if a.joinPrefixToMsg {
+ msg = a.prefix + msg
+ }
+ return msg
+}
+
+const (
+ logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
+ logRegexpTime = `(?P
+
+
+
+
+
Resty Communication Channels
+
+
+
+## News
+
+ * v2.1.0 [released](https://github.com/go-resty/resty/releases/tag/v2.1.0) and tagged on Oct 10, 2019.
+ * v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
+ * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
+ * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
+
+## Features
+
+ * GET, POST, PUT, DELETE, HEAD, PATCH, OPTIONS, etc.
+ * Simple and chainable methods for settings and request
+ * [Request](https://godoc.org/github.com/go-resty/resty#Request) Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too
+ * Auto detects `Content-Type`
+ * Buffer less processing for `io.Reader`
+ * Request Body can be read multiple times via `Request.RawRequest.GetBody()`
+ * [Response](https://godoc.org/github.com/go-resty/resty#Response) object gives you more possibility
+ * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()`
+ * Know your `response.Time()` and when we `response.ReceivedAt()`
+ * Automatic marshal and unmarshal for `JSON` and `XML` content type
+ * Default is `JSON`, if you supply `struct/map` without header `Content-Type`
+ * For auto-unmarshal, refer to -
+ - Success scenario [Request.SetResult()](https://godoc.org/github.com/go-resty/resty#Request.SetResult) and [Response.Result()](https://godoc.org/github.com/go-resty/resty#Response.Result).
+ - Error scenario [Request.SetError()](https://godoc.org/github.com/go-resty/resty#Request.SetError) and [Response.Error()](https://godoc.org/github.com/go-resty/resty#Response.Error).
+ - Supports [RFC7807](https://tools.ietf.org/html/rfc7807) - `application/problem+json` & `application/problem+xml`
+ * Easy to upload one or more file(s) via `multipart/form-data`
+ * Auto detects file content type
+ * Request URL [Path Params (aka URI Params)](https://godoc.org/github.com/go-resty/resty#Request.SetPathParams)
+ * Backoff Retry Mechanism with retry condition function [reference](retry_test.go)
+ * Resty client HTTP & REST [Request](https://godoc.org/github.com/go-resty/resty#Client.OnBeforeRequest) and [Response](https://godoc.org/github.com/go-resty/resty#Client.OnAfterResponse) middlewares
+ * `Request.SetContext` supported
+ * Authorization option of `BasicAuth` and `Bearer` token
+ * Set request `ContentLength` value for all request or particular request
+ * Custom [Root Certificates](https://godoc.org/github.com/go-resty/resty#Client.SetRootCertificate) and Client [Certificates](https://godoc.org/github.com/go-resty/resty#Client.SetCertificates)
+ * Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://godoc.org/github.com/go-resty/resty#Client.SetOutputDirectory) & [SetOutput](https://godoc.org/github.com/go-resty/resty#Request.SetOutput).
+ * Cookies for your request and CookieJar support
+ * SRV Record based request instead of Host URL
+ * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc.
+ * Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://godoc.org/github.com/go-resty/resty#Client.SetAllowGetMethodPayload)
+ * Supports registering external JSON library into resty, see [how to use](https://github.com/go-resty/resty/issues/76#issuecomment-314015250)
+ * Exposes Response reader without reading response (no auto-unmarshaling) if need be, see [how to use](https://github.com/go-resty/resty/issues/87#issuecomment-322100604)
+ * Option to specify expected `Content-Type` when response `Content-Type` header missing. Refer to [#92](https://github.com/go-resty/resty/issues/92)
+ * Resty design
+ * Have client level settings & options and also override at Request level if you want to
+ * Request and Response middlewares
+ * Create Multiple clients if you want to `resty.New()`
+ * Supports `http.RoundTripper` implementation, see [SetTransport](https://godoc.org/github.com/go-resty/resty#Client.SetTransport)
+ * goroutine concurrent safe
+ * Resty Client trace, see [Client.EnableTrace](https://godoc.org/github.com/go-resty/resty#Client.EnableTrace) and [Request.EnableTrace](https://godoc.org/github.com/go-resty/resty#Request.EnableTrace)
+ * Debug mode - clean and informative logging presentation
+ * Gzip - Go does it automatically also resty has fallback handling too
+ * Works fine with `HTTP/2` and `HTTP/1.1`
+ * [Bazel support](#bazel-support)
+ * Easily mock Resty for testing, [for e.g.](#mocking-http-requests-using-httpmock-library)
+ * Well tested client library
+
+### Included Batteries
+
+ * Redirect Policies - see [how to use](#redirect-policy)
+ * NoRedirectPolicy
+ * FlexibleRedirectPolicy
+ * DomainCheckRedirectPolicy
+ * etc. [more info](redirect.go)
+ * Retry Mechanism [how to use](#retries)
+ * Backoff Retry
+ * Conditional Retry
+ * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412)
+ * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)).
+
+
+#### Supported Go Versions
+
+Initially Resty started supporting `go modules` since `v1.10.0` release.
+
+Starting Resty v2 and higher versions, it fully embraces [go modules](https://github.com/golang/go/wiki/Modules) package release. It requires a Go version capable of understanding `/vN` suffixed imports:
+
+- 1.9.7+
+- 1.10.3+
+- 1.11+
+
+
+## It might be beneficial for your project :smile:
+
+Resty author also published following projects for Go Community.
+
+ * [aah framework](https://aahframework.org) - A secure, flexible, rapid Go web framework.
+ * [THUMBAI](https://thumbai.app) - Go Mod Repository, Go Vanity Service and Simple Proxy Server.
+ * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`.
+
+
+## Installation
+
+```bash
+# Go Modules
+require github.com/go-resty/resty/v2 v2.1.0
+```
+
+## Usage
+
+The following samples will assist you to become as comfortable as possible with resty library.
+
+```go
+// Import resty into your code and refer it as `resty`.
+import "github.com/go-resty/resty/v2"
+```
+
+#### Simple GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ EnableTrace().
+ Get("https://httpbin.org/get")
+
+// Explore response object
+fmt.Println("Response Info:")
+fmt.Println("Error :", err)
+fmt.Println("Status Code:", resp.StatusCode())
+fmt.Println("Status :", resp.Status())
+fmt.Println("Time :", resp.Time())
+fmt.Println("Received At:", resp.ReceivedAt())
+fmt.Println("Body :\n", resp)
+fmt.Println()
+
+// Explore trace info
+fmt.Println("Request Trace Info:")
+ti := resp.Request.TraceInfo()
+fmt.Println("DNSLookup :", ti.DNSLookup)
+fmt.Println("ConnTime :", ti.ConnTime)
+fmt.Println("TLSHandshake :", ti.TLSHandshake)
+fmt.Println("ServerTime :", ti.ServerTime)
+fmt.Println("ResponseTime :", ti.ResponseTime)
+fmt.Println("TotalTime :", ti.TotalTime)
+fmt.Println("IsConnReused :", ti.IsConnReused)
+fmt.Println("IsConnWasIdle:", ti.IsConnWasIdle)
+fmt.Println("ConnIdleTime :", ti.ConnIdleTime)
+
+/* Output
+Response Info:
+Error :
+Status Code: 200
+Status : 200 OK
+Time : 465.301137ms
+Received At: 2019-06-16 01:52:33.772456 -0800 PST m=+0.466672260
+Body :
+ {
+ "args": {},
+ "headers": {
+ "Accept-Encoding": "gzip",
+ "Host": "httpbin.org",
+ "User-Agent": "go-resty/2.0.0 (https://github.com/go-resty/resty)"
+ },
+ "origin": "0.0.0.0",
+ "url": "https://httpbin.org/get"
+}
+
+Request Trace Info:
+DNSLookup : 2.21124ms
+ConnTime : 393.875795ms
+TLSHandshake : 319.313546ms
+ServerTime : 71.109256ms
+ResponseTime : 94.466µs
+TotalTime : 465.301137ms
+IsConnReused : false
+IsConnWasIdle: false
+ConnIdleTime : 0s
+*/
+```
+
+#### Enhanced GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetQueryParams(map[string]string{
+ "page_no": "1",
+ "limit": "20",
+ "sort":"name",
+ "order": "asc",
+ "random":strconv.FormatInt(time.Now().Unix(), 10),
+ }).
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/search_result")
+
+
+// Sample of using Request.SetQueryString method
+resp, err := client.R().
+ SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more").
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/show_product")
+```
+
+#### Various POST method combinations
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// POST JSON string
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{"username":"testuser", "password":"testpass"}`).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST []byte array
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST Struct, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(User{Username: "testuser", Password: "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST Map, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST of raw bytes for file upload. For example: upload file to Dropbox
+fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf")
+
+// See we are not setting content-type header, since go-resty automatically detects Content-Type for you
+resp, err := client.R().
+ SetBody(fileBytes).
+ SetContentLength(true). // Dropbox expects this value
+ SetAuthToken("").
+ SetError(&DropboxError{}). // or SetError(DropboxError{}).
+ Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too
+
+// Note: resty detects Content-Type for request body/payload if content type header is not set.
+// * For struct and map data type defaults to 'application/json'
+// * Fallback is plain text content type
+```
+
+#### Sample PUT
+
+You can use various combinations of `PUT` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Title: "go-resty",
+ Content: "This is my article content, oh ya!",
+ Author: "Jeevanandam M",
+ Tags: []string{"article", "sample", "resty"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Put("https://myapp.com/article/1234")
+```
+
+#### Sample PATCH
+
+You can use various combinations of `PATCH` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Tags: []string{"new tag1", "new tag2"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Patch("https://myapp.com/articles/1234")
+```
+
+#### Sample DELETE, HEAD, OPTIONS
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// DELETE a article
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Delete("https://myapp.com/articles/1234")
+
+// DELETE a articles with payload/body as a JSON string
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`).
+ Delete("https://myapp.com/articles")
+
+// HEAD of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Head("https://myapp.com/videos/hi-res-video")
+
+// OPTIONS of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Options("https://myapp.com/servers/nyc-dc-01")
+```
+
+### Multipart File(s) upload
+
+#### Using io.Reader
+
+```go
+profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png")
+notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt")
+
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)).
+ SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ }).
+ Post("http://myapp.com/upload")
+```
+
+#### Using File directly from Path
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Single file scenario
+resp, err := client.R().
+ SetFile("profile_img", "/Users/jeeva/test-img.png").
+ Post("http://myapp.com/upload")
+
+// Multiple files scenario
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ Post("http://myapp.com/upload")
+
+// Multipart of form fields and files
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "my city",
+ "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD",
+ }).
+ Post("http://myapp.com/profile")
+```
+
+#### Sample Form submission
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// just mentioning about POST as an example with simple flow
+// User Login
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "username": "jeeva",
+ "password": "mypass",
+ }).
+ Post("http://myapp.com/login")
+
+// Followed by profile update
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "new city update",
+ }).
+ Post("http://myapp.com/profile")
+
+// Multi value form data
+criteria := url.Values{
+ "search_criteria": []string{"book", "glass", "pencil"},
+}
+resp, err := client.R().
+ SetFormDataFromValues(criteria).
+ Post("http://myapp.com/search")
+```
+
+#### Save HTTP Response into File
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting output directory path, If directory not exists then resty creates one!
+// This is optional one, if you're planning using absoule path in
+// `Request.SetOutput` and can used together.
+client.SetOutputDirectory("/Users/jeeva/Downloads")
+
+// HTTP response gets saved into file, similar to curl -o flag
+_, err := client.R().
+ SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+
+// OR using absolute path
+// Note: output directory path is not used for absoulte path
+_, err := client.R().
+ SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+```
+
+#### Request URL Path Params
+
+Resty provides easy to use dynamic request URL path params. Params can be set at client and request level. Client level params value can be overridden at request level.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.R().SetPathParams(map[string]string{
+ "userId": "sample@sample.com",
+ "subAccountId": "100002",
+}).
+Get("/v1/users/{userId}/{subAccountId}/details")
+
+// Result:
+// Composed URL - /v1/users/sample@sample.com/100002/details
+```
+
+#### Request and Response Middleware
+
+Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Registering Request Middleware
+client.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error {
+ // Now you have access to Client and current Request object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+
+// Registering Response Middleware
+client.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error {
+ // Now you have access to Client and current Response object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+```
+
+#### Redirect Policy
+
+Resty provides few ready to use redirect policy(s) also it supports multiple policies together.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Assign Client Redirect Policy. Create one as per you need
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15))
+
+// Wanna multiple policies such as redirect count, domain name check, etc
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20),
+ resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+```
+
+##### Custom Redirect Policy
+
+Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Using raw func into resty.SetRedirectPolicy
+client.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}))
+
+//---------------------------------------------------
+
+// Using struct create more flexible redirect policy
+type CustomRedirectPolicy struct {
+ // variables goes here
+}
+
+func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}
+
+// Registering in resty
+client.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */})
+```
+
+#### Custom Root Certificates and Client Certificates
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Custom Root certificates, just supply .pem file.
+// you can add one or more root certificates, its get appended
+client.SetRootCertificate("/path/to/root/pemFile1.pem")
+client.SetRootCertificate("/path/to/root/pemFile2.pem")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key")
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Proxy Settings - Client as well as at Request Level
+
+Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`.
+Choose as per your need.
+
+**Client Level Proxy** settings applied to all the request
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting a Proxy URL and Port
+client.SetProxy("http://proxyserver:8888")
+
+// Want to remove proxy setting
+client.RemoveProxy()
+```
+
+#### Retries
+
+Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html)
+to increase retry intervals after each attempt.
+
+Usage example:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Retries are configured per client
+client.
+ // Set retry count to non zero to enable retries
+ SetRetryCount(3).
+ // You can override initial retry wait time.
+ // Default is 100 milliseconds.
+ SetRetryWaitTime(5 * time.Second).
+ // MaxWaitTime can be overridden as well.
+ // Default is 2 seconds.
+ SetRetryMaxWaitTime(20 * time.Second).
+ // SetRetryAfter sets callback to calculate wait time between retries.
+ // Default (nil) implies exponential backoff with jitter
+ SetRetryAfter(func(client *Client, resp *Response) (time.Duration, error) {
+ return 0, errors.New("quota exceeded")
+ })
+```
+
+Above setup will result in resty retrying requests returned non nil error up to
+3 times with delay increased after each attempt.
+
+You can optionally provide client with custom retry conditions:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.AddRetryCondition(
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ func(r *resty.Response, err error) bool {
+ return r.StatusCode() == http.StatusTooManyRequests
+ },
+)
+```
+
+Above example will make resty retry requests ended with `429 Too Many Requests`
+status code.
+
+Multiple retry conditions can be added.
+
+It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios
+implemented. [Reference](retry_test.go).
+
+#### Allow GET request with Payload
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Allow GET request with Payload. This is disabled by default.
+client.SetAllowGetMethodPayload(true)
+```
+
+#### Wanna Multiple Clients
+
+```go
+// Here you go!
+// Client 1
+client1 := resty.New()
+client1.R().Get("http://httpbin.org")
+// ...
+
+// Client 2
+client2 := resty.New()
+client2.R().Head("http://httpbin.org")
+// ...
+
+// Bend it as per your need!!!
+```
+
+#### Remaining Client Settings & its Options
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Unique settings at Client level
+//--------------------------------
+// Enable debug mode
+client.SetDebug(true)
+
+// Assign Client TLSClientConfig
+// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+
+// or One can disable security check (https)
+client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+
+// Set client timeout as per your need
+client.SetTimeout(1 * time.Minute)
+
+
+// You can override all below settings and options at request level if you want to
+//--------------------------------------------------------------------------------
+// Host URL for all request. So you can use relative URL in the request
+client.SetHostURL("http://httpbin.org")
+
+// Headers for all request
+client.SetHeader("Accept", "application/json")
+client.SetHeaders(map[string]string{
+ "Content-Type": "application/json",
+ "User-Agent": "My custom User Agent String",
+ })
+
+// Cookies for all request
+client.SetCookie(&http.Cookie{
+ Name:"go-resty",
+ Value:"This is cookie value",
+ Path: "/",
+ Domain: "sample.com",
+ MaxAge: 36000,
+ HttpOnly: true,
+ Secure: false,
+ })
+client.SetCookies(cookies)
+
+// URL query parameters for all request
+client.SetQueryParam("user_id", "00001")
+client.SetQueryParams(map[string]string{ // sample of those who use this manner
+ "api_key": "api-key-here",
+ "api_secert": "api-secert",
+ })
+client.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+
+// Form data for all request. Typically used with POST and PUT
+client.SetFormData(map[string]string{
+ "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+ })
+
+// Basic Auth for all request
+client.SetBasicAuth("myuser", "mypass")
+
+// Bearer Auth Token for all request
+client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+
+// Enabling Content length value for all request
+client.SetContentLength(true)
+
+// Registering global Error object structure for JSON/XML request
+client.SetError(&Error{}) // or resty.SetError(Error{})
+```
+
+#### Unix Socket
+
+```go
+unixSocket := "/var/run/my_socket.sock"
+
+// Create a Go's http.Transport so we can set it in resty.
+transport := http.Transport{
+ Dial: func(_, _ string) (net.Conn, error) {
+ return net.Dial("unix", unixSocket)
+ },
+}
+
+// Create a Resty Client
+client := resty.New()
+
+// Set the previous transport that we created, set the scheme of the communication to the
+// socket and set the unixSocket as the HostURL.
+client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket)
+
+// No need to write the host's URL on the request, just the path.
+client.R().Get("/index.html")
+```
+
+#### Bazel support
+
+Resty can be built, tested and depended upon via [Bazel](https://bazel.build).
+For example, to run all tests:
+
+```shell
+bazel test :go_default_test
+```
+
+#### Mocking http requests using [httpmock](https://github.com/jarcoal/httpmock) library
+
+In order to mock the http requests when testing your application you
+could use the `httpmock` library.
+
+When using the default resty client, you should pass the client to the library as follow:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Get the underlying HTTP Client and set it to Mock
+httpmock.ActivateNonDefault(client.GetClient())
+```
+
+More detailed example of mocking resty http requests using ginko could be found [here](https://github.com/jarcoal/httpmock#ginkgo--resty-example).
+
+## Versioning
+
+Resty releases versions according to [Semantic Versioning](http://semver.org)
+
+ * Resty v2 does not use `gopkg.in` service for library versioning.
+ * Resty fully adapted to `go mod` capabilities since `v1.10.0` release.
+ * Resty v1 series was using `gopkg.in` to provide versioning. `gopkg.in/resty.vX` points to appropriate tagged versions; `X` denotes version series number and it's a stable release for production use. For e.g. `gopkg.in/resty.v0`.
+ * Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. I aim to maintain backwards compatibility, but sometimes API's and behavior might be changed to fix a bug.
+
+## Contribution
+
+I would welcome your contribution! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests.
+
+BTW, I'd like to know what you think about `Resty`. Kindly open an issue or send me an email; it'd mean a lot to me.
+
+## Creator
+
+[Jeevanandam M.](https://github.com/jeevatkm) (jeeva@myjeeva.com)
+
+## Contributors
+
+Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page.
+
+## License
+
+Resty released under MIT license, refer [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/go-resty/resty/v2/WORKSPACE b/vendor/github.com/go-resty/resty/v2/WORKSPACE
new file mode 100644
index 000000000..5459d6321
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/WORKSPACE
@@ -0,0 +1,27 @@
+workspace(name = "resty")
+
+git_repository(
+ name = "io_bazel_rules_go",
+ remote = "https://github.com/bazelbuild/rules_go.git",
+ tag = "0.13.0",
+)
+
+git_repository(
+ name = "bazel_gazelle",
+ remote = "https://github.com/bazelbuild/bazel-gazelle.git",
+ tag = "0.13.0",
+)
+
+load(
+ "@io_bazel_rules_go//go:def.bzl",
+ "go_rules_dependencies",
+ "go_register_toolchains",
+)
+
+go_rules_dependencies()
+
+go_register_toolchains()
+
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+
+gazelle_dependencies()
diff --git a/vendor/github.com/go-resty/resty/v2/client.go b/vendor/github.com/go-resty/resty/v2/client.go
new file mode 100644
index 000000000..5a6e63317
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/client.go
@@ -0,0 +1,946 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // MethodGet HTTP method
+ MethodGet = "GET"
+
+ // MethodPost HTTP method
+ MethodPost = "POST"
+
+ // MethodPut HTTP method
+ MethodPut = "PUT"
+
+ // MethodDelete HTTP method
+ MethodDelete = "DELETE"
+
+ // MethodPatch HTTP method
+ MethodPatch = "PATCH"
+
+ // MethodHead HTTP method
+ MethodHead = "HEAD"
+
+ // MethodOptions HTTP method
+ MethodOptions = "OPTIONS"
+)
+
+var (
+ hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent")
+ hdrAcceptKey = http.CanonicalHeaderKey("Accept")
+ hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type")
+ hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length")
+ hdrContentEncodingKey = http.CanonicalHeaderKey("Content-Encoding")
+ hdrAuthorizationKey = http.CanonicalHeaderKey("Authorization")
+
+ plainTextType = "text/plain; charset=utf-8"
+ jsonContentType = "application/json"
+ formContentType = "application/x-www-form-urlencoded"
+
+ jsonCheck = regexp.MustCompile(`(?i:(application|text)/(json|.*\+json|json\-.*)(;|$))`)
+ xmlCheck = regexp.MustCompile(`(?i:(application|text)/(xml|.*\+xml)(;|$))`)
+
+ hdrUserAgentValue = "go-resty/" + Version + " (https://github.com/go-resty/resty)"
+ bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}
+)
+
+// Client struct is used to create Resty client with client level settings,
+// these settings are applicable to all the request raised from the client.
+//
+// Resty also provides an options to override most of the client settings
+// at request level.
+type Client struct {
+ HostURL string
+ QueryParam url.Values
+ FormData url.Values
+ Header http.Header
+ UserInfo *User
+ Token string
+ Cookies []*http.Cookie
+ Error reflect.Type
+ Debug bool
+ DisableWarn bool
+ AllowGetMethodPayload bool
+ RetryCount int
+ RetryWaitTime time.Duration
+ RetryMaxWaitTime time.Duration
+ RetryConditions []RetryConditionFunc
+ RetryAfter RetryAfterFunc
+ JSONMarshal func(v interface{}) ([]byte, error)
+ JSONUnmarshal func(data []byte, v interface{}) error
+
+ jsonEscapeHTML bool
+ setContentLength bool
+ closeConnection bool
+ notParseResponse bool
+ trace bool
+ debugBodySizeLimit int64
+ outputDirectory string
+ scheme string
+ pathParams map[string]string
+ log Logger
+ httpClient *http.Client
+ proxyURL *url.URL
+ beforeRequest []func(*Client, *Request) error
+ udBeforeRequest []func(*Client, *Request) error
+ preReqHook func(*Client, *http.Request) error
+ afterResponse []func(*Client, *Response) error
+ requestLog func(*RequestLog) error
+ responseLog func(*ResponseLog) error
+}
+
+// User type is to hold an username and password information
+type User struct {
+ Username, Password string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client methods
+//___________________________________
+
+// SetHostURL method is to set Host URL in the client instance. It will be used with request
+// raised from this client with relative URL
+// // Setting HTTP address
+// client.SetHostURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetHostURL("https://myjeeva.com")
+func (c *Client) SetHostURL(url string) *Client {
+ c.HostURL = strings.TrimRight(url, "/")
+ return c
+}
+
+// SetHeader method sets a single header field and its value in the client instance.
+// These headers will be applied to all requests raised from this client instance.
+// Also it can be overridden at request level header options.
+//
+// See `Request.SetHeader` or `Request.SetHeaders`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+func (c *Client) SetHeader(header, value string) *Client {
+ c.Header.Set(header, value)
+ return c
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the client instance.
+// These headers will be applied to all requests raised from this client instance. Also it can be
+// overridden at request level headers options.
+//
+// See `Request.SetHeaders` or `Request.SetHeader`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+func (c *Client) SetHeaders(headers map[string]string) *Client {
+ for h, v := range headers {
+ c.Header.Set(h, v)
+ }
+ return c
+}
+
+// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default.
+//
+// For Example: sometimes we don't want to save cookies in api contacting, we can remove the default
+// CookieJar in resty client.
+//
+// client.SetCookieJar(nil)
+func (c *Client) SetCookieJar(jar http.CookieJar) *Client {
+ c.httpClient.Jar = jar
+ return c
+}
+
+// SetCookie method appends a single cookie in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// client.SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+func (c *Client) SetCookie(hc *http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, hc)
+ return c
+}
+
+// SetCookies method sets an array of cookies in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty
+// client.SetCookies(cookies)
+func (c *Client) SetCookies(cs []*http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, cs...)
+ return c
+}
+
+// SetQueryParam method sets single parameter and its value in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from
+// this client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParam` or `Request.SetQueryParams`.
+// client.
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+func (c *Client) SetQueryParam(param, value string) *Client {
+ c.QueryParam.Set(param, value)
+ return c
+}
+
+// SetQueryParams method sets multiple parameters and their values at one go in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from this
+// client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParams` or `Request.SetQueryParam`.
+// client.SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+func (c *Client) SetQueryParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetQueryParam(p, v)
+ }
+ return c
+}
+
+// SetFormData method sets Form parameters and their values in the client instance.
+// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as
+// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from
+// this client instance. Also it can be overridden at request level form data.
+//
+// See `Request.SetFormData`.
+// client.SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+func (c *Client) SetFormData(data map[string]string) *Client {
+ for k, v := range data {
+ c.FormData.Set(k, v)
+ }
+ return c
+}
+
+// SetBasicAuth method sets the basic authentication header in the HTTP request. For Example:
+// Authorization: Basic
+//
+// For Example: To set the header for username "go-resty" and password "welcome"
+// client.SetBasicAuth("go-resty", "welcome")
+//
+// This basic auth information gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetBasicAuth`.
+func (c *Client) SetBasicAuth(username, password string) *Client {
+ c.UserInfo = &User{Username: username, Password: password}
+ return c
+}
+
+// SetAuthToken method sets bearer auth token header in the HTTP request. For Example:
+// Authorization: Bearer
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This bearer auth token gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthToken(token string) *Client {
+ c.Token = token
+ return c
+}
+
+// R method creates a new request instance, its used for Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) R() *Request {
+ r := &Request{
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+
+ client: c,
+ multipartFiles: []*File{},
+ multipartFields: []*MultipartField{},
+ pathParams: map[string]string{},
+ jsonEscapeHTML: true,
+ }
+ return r
+}
+
+// NewRequest is an alias for method `R()`. Creates a new request instance, its used for
+// Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) NewRequest() *Request {
+ return c.R()
+}
+
+// OnBeforeRequest method appends request middleware into the before request chain.
+// Its gets applied after default Resty request middlewares and before request
+// been sent from Resty to host server.
+// client.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error {
+// // Now you have access to Client and Request instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnBeforeRequest(m func(*Client, *Request) error) *Client {
+ c.udBeforeRequest = append(c.udBeforeRequest, m)
+ return c
+}
+
+// OnAfterResponse method appends response middleware into the after response chain.
+// Once we receive response from host server, default Resty response middleware
+// gets applied and then user assigened response middlewares applied.
+// client.OnAfterResponse(func(c *resty.Client, r *resty.Response) error {
+// // Now you have access to Client and Response instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnAfterResponse(m func(*Client, *Response) error) *Client {
+ c.afterResponse = append(c.afterResponse, m)
+ return c
+}
+
+// SetPreRequestHook method sets the given pre-request function into resty client.
+// It is called right before the request is fired.
+//
+// Note: Only one pre-request hook can be registered. Use `client.OnBeforeRequest` for mutilple.
+func (c *Client) SetPreRequestHook(h func(*Client, *http.Request) error) *Client {
+ if c.preReqHook != nil {
+ c.log.Warnf("Overwriting an existing pre-request hook: %s", functionName(h))
+ }
+ c.preReqHook = h
+ return c
+}
+
+// SetDebug method enables the debug mode on Resty client. Client logs details of every request and response.
+// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one.
+// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one.
+// client.SetDebug(true)
+func (c *Client) SetDebug(d bool) *Client {
+ c.Debug = d
+ return c
+}
+
+// SetDebugBodyLimit sets the maximum size for which the response body will be logged in debug mode.
+// client.SetDebugBodyLimit(1000000)
+func (c *Client) SetDebugBodyLimit(sl int64) *Client {
+ c.debugBodySizeLimit = sl
+ return c
+}
+
+// OnRequestLog method used to set request log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnRequestLog(rl func(*RequestLog) error) *Client {
+ if c.requestLog != nil {
+ c.log.Warnf("Overwriting an existing on-request-log callback from=%s to=%s",
+ functionName(c.requestLog), functionName(rl))
+ }
+ c.requestLog = rl
+ return c
+}
+
+// OnResponseLog method used to set response log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnResponseLog(rl func(*ResponseLog) error) *Client {
+ if c.responseLog != nil {
+ c.log.Warnf("Overwriting an existing on-response-log callback from=%s to=%s",
+ functionName(c.responseLog), functionName(rl))
+ }
+ c.responseLog = rl
+ return c
+}
+
+// SetDisableWarn method disables the warning message on Resty client.
+//
+// For Example: Resty warns the user when BasicAuth used on non-TLS mode.
+// client.SetDisableWarn(true)
+func (c *Client) SetDisableWarn(d bool) *Client {
+ c.DisableWarn = d
+ return c
+}
+
+// SetAllowGetMethodPayload method allows the GET method with payload on Resty client.
+//
+// For Example: Resty allows the user sends request with a payload on HTTP GET method.
+// client.SetAllowGetMethodPayload(true)
+func (c *Client) SetAllowGetMethodPayload(a bool) *Client {
+ c.AllowGetMethodPayload = a
+ return c
+}
+
+// SetLogger method sets given writer for logging Resty request and response details.
+//
+// Compliant to interface `resty.Logger`.
+func (c *Client) SetLogger(l Logger) *Client {
+ c.log = l
+ return c
+}
+
+// SetContentLength method enables the HTTP header `Content-Length` value for every request.
+// By default Resty won't set `Content-Length`.
+// client.SetContentLength(true)
+//
+// Also you have an option to enable for particular request. See `Request.SetContentLength`
+func (c *Client) SetContentLength(l bool) *Client {
+ c.setContentLength = l
+ return c
+}
+
+// SetTimeout method sets timeout for request raised from client.
+// client.SetTimeout(time.Duration(1 * time.Minute))
+func (c *Client) SetTimeout(timeout time.Duration) *Client {
+ c.httpClient.Timeout = timeout
+ return c
+}
+
+// SetError method is to register the global or client common `Error` object into Resty.
+// It is used for automatic unmarshalling if response status code is greater than 399 and
+// content type either JSON or XML. Can be pointer or non-pointer.
+// client.SetError(&Error{})
+// // OR
+// client.SetError(Error{})
+func (c *Client) SetError(err interface{}) *Client {
+ c.Error = typeOf(err)
+ return c
+}
+
+// SetRedirectPolicy method sets the client redirect poilicy. Resty provides ready to use
+// redirect policies. Wanna create one for yourself refer to `redirect.go`.
+//
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+//
+// // Need multiple redirect policies together
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net"))
+func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client {
+ for _, p := range policies {
+ if _, ok := p.(RedirectPolicy); !ok {
+ c.log.Errorf("%v does not implement resty.RedirectPolicy (missing Apply method)",
+ functionName(p))
+ }
+ }
+
+ c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ for _, p := range policies {
+ if err := p.(RedirectPolicy).Apply(req, via); err != nil {
+ return err
+ }
+ }
+ return nil // looks good, go ahead
+ }
+
+ return c
+}
+
+// SetRetryCount method enables retry on Resty client and allows you
+// to set no. of retry count. Resty uses a Backoff mechanism.
+func (c *Client) SetRetryCount(count int) *Client {
+ c.RetryCount = count
+ return c
+}
+
+// SetRetryWaitTime method sets default wait time to sleep before retrying
+// request.
+//
+// Default is 100 milliseconds.
+func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client {
+ c.RetryWaitTime = waitTime
+ return c
+}
+
+// SetRetryMaxWaitTime method sets max wait time to sleep before retrying
+// request.
+//
+// Default is 2 seconds.
+func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client {
+ c.RetryMaxWaitTime = maxWaitTime
+ return c
+}
+
+// SetRetryAfter sets callback to calculate wait time between retries.
+// Default (nil) implies exponential backoff with jitter
+func (c *Client) SetRetryAfter(callback RetryAfterFunc) *Client {
+ c.RetryAfter = callback
+ return c
+}
+
+// AddRetryCondition method adds a retry condition function to array of functions
+// that are checked to determine if the request is retried. The request will
+// retry if any of the functions return true and error is nil.
+func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client {
+ c.RetryConditions = append(c.RetryConditions, condition)
+ return c
+}
+
+// SetTLSClientConfig method sets TLSClientConfig for underling client Transport.
+//
+// For Example:
+// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+// client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+//
+// // or One can disable security check (https)
+// client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+//
+// Note: This method overwrites existing `TLSClientConfig`.
+func (c *Client) SetTLSClientConfig(config *tls.Config) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ transport.TLSClientConfig = config
+ return c
+}
+
+// SetProxy method sets the Proxy URL and Port for Resty client.
+// client.SetProxy("http://proxyserver:8888")
+//
+// OR Without this `SetProxy` method, you could also set Proxy via environment variable.
+//
+// Refer to godoc `http.ProxyFromEnvironment`.
+func (c *Client) SetProxy(proxyURL string) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ pURL, err := url.Parse(proxyURL)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ c.proxyURL = pURL
+ transport.Proxy = http.ProxyURL(c.proxyURL)
+ return c
+}
+
+// RemoveProxy method removes the proxy configuration from Resty client
+// client.RemoveProxy()
+func (c *Client) RemoveProxy() *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ c.proxyURL = nil
+ transport.Proxy = nil
+ return c
+}
+
+// SetCertificates method helps to set client certificates into Resty conveniently.
+func (c *Client) SetCertificates(certs ...tls.Certificate) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ config.Certificates = append(config.Certificates, certs...)
+ return c
+}
+
+// SetRootCertificate method helps to add one or more root certificates into Resty client
+// client.SetRootCertificate("/path/to/root/pemFile.pem")
+func (c *Client) SetRootCertificate(pemFilePath string) *Client {
+ rootPemData, err := ioutil.ReadFile(pemFilePath)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM(rootPemData)
+ return c
+}
+
+// SetOutputDirectory method sets output directory for saving HTTP response into file.
+// If the output directory not exists then resty creates one. This setting is optional one,
+// if you're planning using absoule path in `Request.SetOutput` and can used together.
+// client.SetOutputDirectory("/save/http/response/here")
+func (c *Client) SetOutputDirectory(dirPath string) *Client {
+ c.outputDirectory = dirPath
+ return c
+}
+
+// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper`
+// compatible interface implementation in the resty client.
+//
+// Note:
+//
+// - If transport is not type of `*http.Transport` then you may not be able to
+// take advantage of some of the Resty client settings.
+//
+// - It overwrites the Resty client transport instance and it's configurations.
+//
+// transport := &http.Transport{
+// // somthing like Proxying to httptest.Server, etc...
+// Proxy: func(req *http.Request) (*url.URL, error) {
+// return url.Parse(server.URL)
+// },
+// }
+//
+// client.SetTransport(transport)
+func (c *Client) SetTransport(transport http.RoundTripper) *Client {
+ if transport != nil {
+ c.httpClient.Transport = transport
+ }
+ return c
+}
+
+// SetScheme method sets custom scheme in the Resty client. It's way to override default.
+// client.SetScheme("http")
+func (c *Client) SetScheme(scheme string) *Client {
+ if !IsStringEmpty(scheme) {
+ c.scheme = scheme
+ }
+ return c
+}
+
+// SetCloseConnection method sets variable `Close` in http request struct with the given
+// value. More info: https://golang.org/src/net/http/request.go
+func (c *Client) SetCloseConnection(close bool) *Client {
+ c.closeConnection = close
+ return c
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (c *Client) SetDoNotParseResponse(parse bool) *Client {
+ c.notParseResponse = parse
+ return c
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty client instance.
+// client.SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replace the value of the key while composing request URL. Also it can be
+// overridden at request level Path Params options, see `Request.SetPathParams`.
+func (c *Client) SetPathParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.pathParams[p] = v
+ }
+ return c
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (c *Client) SetJSONEscapeHTML(b bool) *Client {
+ c.jsonEscapeHTML = b
+ return c
+}
+
+// EnableTrace method enables the Resty client trace for the requests fired from
+// the client using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New().EnableTrace()
+//
+// resp, err := client.R().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// Also `Request.EnableTrace` available too to get trace info for single request.
+//
+// Since v2.0.0
+func (c *Client) EnableTrace() *Client {
+ c.trace = true
+ return c
+}
+
+// DisableTrace method disables the Resty client trace. Refer to `Client.EnableTrace`.
+//
+// Since v2.0.0
+func (c *Client) DisableTrace() *Client {
+ c.trace = false
+ return c
+}
+
+// IsProxySet method returns the true is proxy is set from resty client otherwise
+// false. By default proxy is set from environment, refer to `http.ProxyFromEnvironment`.
+func (c *Client) IsProxySet() bool {
+ return c.proxyURL != nil
+}
+
+// GetClient method returns the current `http.Client` used by the resty client.
+func (c *Client) GetClient() *http.Client {
+ return c.httpClient
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client Unexported methods
+//_______________________________________________________________________
+
+// Executes method executes the given `Request` object and returns response
+// error.
+func (c *Client) execute(req *Request) (*Response, error) {
+ defer releaseBuffer(req.bodyBuf)
+ // Apply Request middleware
+ var err error
+
+ // user defined on before request methods
+ // to modify the *resty.Request object
+ for _, f := range c.udBeforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, err
+ }
+ }
+
+ // resty middlewares
+ for _, f := range c.beforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, err
+ }
+ }
+
+ if hostHeader := req.Header.Get("Host"); hostHeader != "" {
+ req.RawRequest.Host = hostHeader
+ }
+
+ // call pre-request if defined
+ if c.preReqHook != nil {
+ if err = c.preReqHook(c, req.RawRequest); err != nil {
+ return nil, err
+ }
+ }
+
+ if err = requestLogger(c, req); err != nil {
+ return nil, err
+ }
+
+ req.Time = time.Now()
+ resp, err := c.httpClient.Do(req.RawRequest)
+ endTime := time.Now()
+
+ if c.trace || req.trace {
+ req.clientTrace.endTime = endTime
+ }
+
+ response := &Response{
+ Request: req,
+ RawResponse: resp,
+ receivedAt: endTime,
+ }
+
+ if err != nil || req.notParseResponse || c.notParseResponse {
+ return response, err
+ }
+
+ if !req.isSaveResponse {
+ defer closeq(resp.Body)
+ body := resp.Body
+
+ // GitHub #142 & #187
+ if strings.EqualFold(resp.Header.Get(hdrContentEncodingKey), "gzip") && resp.ContentLength != 0 {
+ if _, ok := body.(*gzip.Reader); !ok {
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ return response, err
+ }
+ defer closeq(body)
+ }
+ }
+
+ if response.body, err = ioutil.ReadAll(body); err != nil {
+ return response, err
+ }
+
+ response.size = int64(len(response.body))
+ }
+
+ // Apply Response middleware
+ for _, f := range c.afterResponse {
+ if err = f(c, response); err != nil {
+ break
+ }
+ }
+
+ return response, err
+}
+
+// getting TLS client config if not exists then create one
+func (c *Client) tlsConfig() (*tls.Config, error) {
+ transport, err := c.transport()
+ if err != nil {
+ return nil, err
+ }
+ if transport.TLSClientConfig == nil {
+ transport.TLSClientConfig = &tls.Config{}
+ }
+ return transport.TLSClientConfig, nil
+}
+
+// Transport method returns `*http.Transport` currently in use or error
+// in case currently used `transport` is not a `*http.Transport`.
+func (c *Client) transport() (*http.Transport, error) {
+ if transport, ok := c.httpClient.Transport.(*http.Transport); ok {
+ return transport, nil
+ }
+ return nil, errors.New("current transport is not an *http.Transport instance")
+}
+
+// just an internal helper method
+func (c *Client) outputLogTo(w io.Writer) *Client {
+ c.log.(*logger).l.SetOutput(w)
+ return c
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// File struct and its methods
+//_______________________________________________________________________
+
+// File struct represent file information for multipart request
+type File struct {
+ Name string
+ ParamName string
+ io.Reader
+}
+
+// String returns string value of current file details
+func (f *File) String() string {
+ return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// MultipartField struct
+//_______________________________________________________________________
+
+// MultipartField struct represent custom data part for multipart request
+type MultipartField struct {
+ Param string
+ FileName string
+ ContentType string
+ io.Reader
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Unexported package methods
+//_______________________________________________________________________
+
+func createClient(hc *http.Client) *Client {
+ if hc.Transport == nil {
+ hc.Transport = createTransport(nil)
+ }
+
+ c := &Client{ // not setting lang default values
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+ RetryWaitTime: defaultWaitTime,
+ RetryMaxWaitTime: defaultMaxWaitTime,
+ JSONMarshal: json.Marshal,
+ JSONUnmarshal: json.Unmarshal,
+ jsonEscapeHTML: true,
+ httpClient: hc,
+ debugBodySizeLimit: math.MaxInt32,
+ pathParams: make(map[string]string),
+ }
+
+ // Logger
+ c.SetLogger(createLogger())
+
+ // default before request middlewares
+ c.beforeRequest = []func(*Client, *Request) error{
+ parseRequestURL,
+ parseRequestHeader,
+ parseRequestBody,
+ createHTTPRequest,
+ addCredentials,
+ }
+
+ // user defined request middlewares
+ c.udBeforeRequest = []func(*Client, *Request) error{}
+
+ // default after response middlewares
+ c.afterResponse = []func(*Client, *Response) error{
+ responseLogger,
+ parseResponseBody,
+ saveResponseIntoFile,
+ }
+
+ return c
+}
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/go.mod b/vendor/github.com/go-resty/resty/v2/go.mod
new file mode 100644
index 000000000..b2b881ac4
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-resty/resty/v2
+
+require golang.org/x/net v0.0.0-20190628185345-da137c7871d7
diff --git a/vendor/github.com/go-resty/resty/v2/middleware.go b/vendor/github.com/go-resty/resty/v2/middleware.go
new file mode 100644
index 000000000..de3debd41
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/middleware.go
@@ -0,0 +1,515 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+)
+
+const debugRequestLogKey = "__restyDebugRequestLog"
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Middleware(s)
+//_______________________________________________________________________
+
+func parseRequestURL(c *Client, r *Request) error {
+ // GitHub #103 Path Params
+ if len(r.pathParams) > 0 {
+ for p, v := range r.pathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+ if len(c.pathParams) > 0 {
+ for p, v := range c.pathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+
+ // Parsing request URL
+ reqURL, err := url.Parse(r.URL)
+ if err != nil {
+ return err
+ }
+
+ // If Request.URL is relative path then added c.HostURL into
+ // the request URL otherwise Request.URL will be used as-is
+ if !reqURL.IsAbs() {
+ r.URL = reqURL.String()
+ if len(r.URL) > 0 && r.URL[0] != '/' {
+ r.URL = "/" + r.URL
+ }
+
+ reqURL, err = url.Parse(c.HostURL + r.URL)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Adding Query Param
+ query := make(url.Values)
+ for k, v := range c.QueryParam {
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.QueryParam {
+ // remove query param from client level by key
+ // since overrides happens for that key in the request
+ query.Del(k)
+
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ // GitHub #123 Preserve query string order partially.
+ // Since not feasible in `SetQuery*` resty methods, because
+ // standard package `url.Encode(...)` sorts the query params
+ // alphabetically
+ if len(query) > 0 {
+ if IsStringEmpty(reqURL.RawQuery) {
+ reqURL.RawQuery = query.Encode()
+ } else {
+ reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode()
+ }
+ }
+
+ r.URL = reqURL.String()
+
+ return nil
+}
+
+func parseRequestHeader(c *Client, r *Request) error {
+ hdr := make(http.Header)
+ for k := range c.Header {
+ hdr[k] = append(hdr[k], c.Header[k]...)
+ }
+
+ for k := range r.Header {
+ hdr.Del(k)
+ hdr[k] = append(hdr[k], r.Header[k]...)
+ }
+
+ if IsStringEmpty(hdr.Get(hdrUserAgentKey)) {
+ hdr.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+
+ ct := hdr.Get(hdrContentTypeKey)
+ if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(ct) &&
+ (IsJSONType(ct) || IsXMLType(ct)) {
+ hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey))
+ }
+
+ r.Header = hdr
+
+ return nil
+}
+
+func parseRequestBody(c *Client, r *Request) (err error) {
+ if isPayloadSupported(r.Method, c.AllowGetMethodPayload) {
+ // Handling Multipart
+ if r.isMultiPart && !(r.Method == MethodPatch) {
+ if err = handleMultipart(c, r); err != nil {
+ return
+ }
+
+ goto CL
+ }
+
+ // Handling Form Data
+ if len(c.FormData) > 0 || len(r.FormData) > 0 {
+ handleFormData(c, r)
+
+ goto CL
+ }
+
+ // Handling Request body
+ if r.Body != nil {
+ handleContentType(c, r)
+
+ if err = handleRequestBody(c, r); err != nil {
+ return
+ }
+ }
+ }
+
+CL:
+ // by default resty won't set content length, you can if you want to :)
+ if (c.setContentLength || r.setContentLength) && r.bodyBuf != nil {
+ r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len()))
+ }
+
+ return
+}
+
+func createHTTPRequest(c *Client, r *Request) (err error) {
+ if r.bodyBuf == nil {
+ if reader, ok := r.Body.(io.Reader); ok {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, reader)
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil)
+ }
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf)
+ }
+
+ if err != nil {
+ return
+ }
+
+ // Assign close connection option
+ r.RawRequest.Close = c.closeConnection
+
+ // Add headers into http request
+ r.RawRequest.Header = r.Header
+
+ // Add cookies from client instance into http request
+ for _, cookie := range c.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Add cookies from request instance into http request
+ for _, cookie := range r.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // it's for non-http scheme option
+ if r.RawRequest.URL != nil && r.RawRequest.URL.Scheme == "" {
+ r.RawRequest.URL.Scheme = c.scheme
+ r.RawRequest.URL.Host = r.URL
+ }
+
+ // Enable trace
+ if c.trace || r.trace {
+ r.clientTrace = &clientTrace{}
+ r.ctx = r.clientTrace.createContext(r.Context())
+ }
+
+ // Use context if it was specified
+ if r.ctx != nil {
+ r.RawRequest = r.RawRequest.WithContext(r.ctx)
+ }
+
+ // assign get body func for the underlying raw request instance
+ r.RawRequest.GetBody = func() (io.ReadCloser, error) {
+ // If r.bodyBuf present, return the copy
+ if r.bodyBuf != nil {
+ return ioutil.NopCloser(bytes.NewReader(r.bodyBuf.Bytes())), nil
+ }
+
+ // Maybe body is `io.Reader`.
+ // Note: Resty user have to watchout for large body size of `io.Reader`
+ if r.RawRequest.Body != nil {
+ b, err := ioutil.ReadAll(r.RawRequest.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Restore the Body
+ closeq(r.RawRequest.Body)
+ r.RawRequest.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+
+ // Return the Body bytes
+ return ioutil.NopCloser(bytes.NewBuffer(b)), nil
+ }
+
+ return nil, nil
+ }
+
+ return
+}
+
+func addCredentials(c *Client, r *Request) error {
+ var isBasicAuth bool
+ // Basic Auth
+ if r.UserInfo != nil { // takes precedence
+ r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password)
+ isBasicAuth = true
+ } else if c.UserInfo != nil {
+ r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password)
+ isBasicAuth = true
+ }
+
+ if !c.DisableWarn {
+ if isBasicAuth && !strings.HasPrefix(r.URL, "https") {
+ c.log.Warnf("Using Basic Auth in HTTP mode is not secure, use HTTPS")
+ }
+ }
+
+ // Token Auth
+ if !IsStringEmpty(r.Token) { // takes precedence
+ r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+r.Token)
+ } else if !IsStringEmpty(c.Token) {
+ r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+c.Token)
+ }
+
+ return nil
+}
+
+func requestLogger(c *Client, r *Request) error {
+ if c.Debug {
+ rr := r.RawRequest
+ rl := &RequestLog{Header: copyHeaders(rr.Header), Body: r.fmtBodyString()}
+ if c.requestLog != nil {
+ if err := c.requestLog(rl); err != nil {
+ return err
+ }
+ }
+ // fmt.Sprintf("COOKIES:\n%s\n", composeCookies(c.GetClient().Jar, *rr.URL)) +
+
+ reqLog := "\n==============================================================================\n" +
+ "~~~ REQUEST ~~~\n" +
+ fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) +
+ fmt.Sprintf("HOST : %s\n", rr.URL.Host) +
+ fmt.Sprintf("HEADERS:\n%s\n", composeHeaders(c, r, rl.Header)) +
+ fmt.Sprintf("BODY :\n%v\n", rl.Body) +
+ "------------------------------------------------------------------------------\n"
+
+ r.initValuesMap()
+ r.values[debugRequestLogKey] = reqLog
+ }
+
+ return nil
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Middleware(s)
+//_______________________________________________________________________
+
+func responseLogger(c *Client, res *Response) error {
+ if c.Debug {
+ rl := &ResponseLog{Header: copyHeaders(res.Header()), Body: res.fmtBodyString(c.debugBodySizeLimit)}
+ if c.responseLog != nil {
+ if err := c.responseLog(rl); err != nil {
+ return err
+ }
+ }
+
+ debugLog := res.Request.values[debugRequestLogKey].(string)
+ debugLog += "~~~ RESPONSE ~~~\n" +
+ fmt.Sprintf("STATUS : %s\n", res.Status()) +
+ fmt.Sprintf("RECEIVED AT : %v\n", res.ReceivedAt().Format(time.RFC3339Nano)) +
+ fmt.Sprintf("TIME DURATION: %v\n", res.Time()) +
+ "HEADERS :\n" +
+ composeHeaders(c, res.Request, rl.Header) + "\n"
+ if res.Request.isSaveResponse {
+ debugLog += fmt.Sprintf("BODY :\n***** RESPONSE WRITTEN INTO FILE *****\n")
+ } else {
+ debugLog += fmt.Sprintf("BODY :\n%v\n", rl.Body)
+ }
+ debugLog += "==============================================================================\n"
+
+ c.log.Debugf(debugLog)
+ }
+
+ return nil
+}
+
+func parseResponseBody(c *Client, res *Response) (err error) {
+ if res.StatusCode() == http.StatusNoContent {
+ return
+ }
+ // Handles only JSON or XML content type
+ ct := firstNonEmpty(res.Header().Get(hdrContentTypeKey), res.Request.fallbackContentType)
+ if IsJSONType(ct) || IsXMLType(ct) {
+ // HTTP status code > 199 and < 300, considered as Result
+ if res.IsSuccess() {
+ res.Request.Error = nil
+ if res.Request.Result != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Result)
+ return
+ }
+ }
+
+ // HTTP status code > 399, considered as Error
+ if res.IsError() {
+ // global error interface
+ if res.Request.Error == nil && c.Error != nil {
+ res.Request.Error = reflect.New(c.Error).Interface()
+ }
+
+ if res.Request.Error != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Error)
+ }
+ }
+ }
+
+ return
+}
+
+func handleMultipart(c *Client, r *Request) (err error) {
+ r.bodyBuf = acquireBuffer()
+ w := multipart.NewWriter(r.bodyBuf)
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+
+ for k, v := range r.FormData {
+ for _, iv := range v {
+ if strings.HasPrefix(k, "@") { // file
+ err = addFile(w, k[1:], iv)
+ if err != nil {
+ return
+ }
+ } else { // form value
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // #21 - adding io.Reader support
+ if len(r.multipartFiles) > 0 {
+ for _, f := range r.multipartFiles {
+ err = addFileReader(w, f)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ // GitHub #130 adding multipart field support with content type
+ if len(r.multipartFields) > 0 {
+ for _, mf := range r.multipartFields {
+ if err = addMultipartFormField(w, mf); err != nil {
+ return
+ }
+ }
+ }
+
+ r.Header.Set(hdrContentTypeKey, w.FormDataContentType())
+ err = w.Close()
+
+ return
+}
+
+func handleFormData(c *Client, r *Request) {
+ formData := url.Values{}
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.FormData {
+ // remove form data field from client level by key
+ // since overrides happens for that key in the request
+ formData.Del(k)
+
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode()))
+ r.Header.Set(hdrContentTypeKey, formContentType)
+ r.isFormData = true
+}
+
+func handleContentType(c *Client, r *Request) {
+ contentType := r.Header.Get(hdrContentTypeKey)
+ if IsStringEmpty(contentType) {
+ contentType = DetectContentType(r.Body)
+ r.Header.Set(hdrContentTypeKey, contentType)
+ }
+}
+
+func handleRequestBody(c *Client, r *Request) (err error) {
+ var bodyBytes []byte
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ r.bodyBuf = nil
+
+ if reader, ok := r.Body.(io.Reader); ok {
+ if c.setContentLength || r.setContentLength { // keep backward compability
+ r.bodyBuf = acquireBuffer()
+ _, err = r.bodyBuf.ReadFrom(reader)
+ r.Body = nil
+ } else {
+ // Otherwise buffer less processing for `io.Reader`, sounds good.
+ return
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ bodyBytes = b
+ } else if s, ok := r.Body.(string); ok {
+ bodyBytes = []byte(s)
+ } else if IsJSONType(contentType) &&
+ (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) {
+ bodyBytes, err = jsonMarshal(c, r, r.Body)
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ bodyBytes, err = xml.Marshal(r.Body)
+ }
+
+ if bodyBytes == nil && r.bodyBuf == nil {
+ err = errors.New("unsupported 'Body' type/value")
+ }
+
+ // if any errors during body bytes handling, return it
+ if err != nil {
+ return
+ }
+
+ // []byte into Buffer
+ if bodyBytes != nil && r.bodyBuf == nil {
+ r.bodyBuf = acquireBuffer()
+ _, _ = r.bodyBuf.Write(bodyBytes)
+ }
+
+ return
+}
+
+func saveResponseIntoFile(c *Client, res *Response) error {
+ if res.Request.isSaveResponse {
+ file := ""
+
+ if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) {
+ file += c.outputDirectory + string(filepath.Separator)
+ }
+
+ file = filepath.Clean(file + res.Request.outputFile)
+ if err := createDirectory(filepath.Dir(file)); err != nil {
+ return err
+ }
+
+ outFile, err := os.Create(file)
+ if err != nil {
+ return err
+ }
+ defer closeq(outFile)
+
+ // io.Copy reads maximum 32kb size, it is perfect for large file download too
+ defer closeq(res.RawResponse.Body)
+
+ written, err := io.Copy(outFile, res.RawResponse.Body)
+ if err != nil {
+ return err
+ }
+
+ res.size = written
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/redirect.go b/vendor/github.com/go-resty/resty/v2/redirect.go
new file mode 100644
index 000000000..afbe13e80
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/redirect.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+type (
+ // RedirectPolicy to regulate the redirects in the resty client.
+ // Objects implementing the RedirectPolicy interface can be registered as
+ //
+ // Apply function should return nil to continue the redirect jounery, otherwise
+ // return error to stop the redirect.
+ RedirectPolicy interface {
+ Apply(req *http.Request, via []*http.Request) error
+ }
+
+ // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy.
+ // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f.
+ RedirectPolicyFunc func(*http.Request, []*http.Request) error
+)
+
+// Apply calls f(req, via).
+func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error {
+ return f(req, via)
+}
+
+// NoRedirectPolicy is used to disable redirects in the HTTP client
+// resty.SetRedirectPolicy(NoRedirectPolicy())
+func NoRedirectPolicy() RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ return errors.New("auto redirect is disabled")
+ })
+}
+
+// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client.
+// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if len(via) >= noOfRedirect {
+ return fmt.Errorf("stopped after %d redirects", noOfRedirect)
+ }
+ checkHostAndAddHeaders(req, via[0])
+ return nil
+ })
+}
+
+// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client.
+// Redirect is allowed for only mentioned host in the policy.
+// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy {
+ hosts := make(map[string]bool)
+ for _, h := range hostnames {
+ hosts[strings.ToLower(h)] = true
+ }
+
+ fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if ok := hosts[getHostname(req.URL.Host)]; !ok {
+ return errors.New("redirect is not allowed as per DomainCheckRedirectPolicy")
+ }
+
+ return nil
+ })
+
+ return fn
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+func getHostname(host string) (hostname string) {
+ if strings.Index(host, ":") > 0 {
+ host, _, _ = net.SplitHostPort(host)
+ }
+ hostname = strings.ToLower(host)
+ return
+}
+
+// By default Golang will not redirect request headers
+// after go throughing various discussion comments from thread
+// https://github.com/golang/go/issues/4800
+// Resty will add all the headers during a redirect for the same host
+func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) {
+ curHostname := getHostname(cur.URL.Host)
+ preHostname := getHostname(pre.URL.Host)
+ if strings.EqualFold(curHostname, preHostname) {
+ for key, val := range pre.Header {
+ cur.Header[key] = val
+ }
+ } else { // only library User-Agent header is added
+ cur.Header.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/request.go b/vendor/github.com/go-resty/resty/v2/request.go
new file mode 100644
index 000000000..c5fb7d358
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/request.go
@@ -0,0 +1,724 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request struct and methods
+//_______________________________________________________________________
+
+// Request struct is used to compose and fire individual request from
+// resty client. Request provides an options to override client level
+// settings and also an options for the request composition.
+type Request struct {
+ URL string
+ Method string
+ Token string
+ QueryParam url.Values
+ FormData url.Values
+ Header http.Header
+ Time time.Time
+ Body interface{}
+ Result interface{}
+ Error interface{}
+ RawRequest *http.Request
+ SRV *SRVRecord
+ UserInfo *User
+ Cookies []*http.Cookie
+
+ isMultiPart bool
+ isFormData bool
+ setContentLength bool
+ isSaveResponse bool
+ notParseResponse bool
+ jsonEscapeHTML bool
+ trace bool
+ outputFile string
+ fallbackContentType string
+ ctx context.Context
+ pathParams map[string]string
+ values map[string]interface{}
+ client *Client
+ bodyBuf *bytes.Buffer
+ clientTrace *clientTrace
+ multipartFiles []*File
+ multipartFields []*MultipartField
+}
+
+// Context method returns the Context if its already set in request
+// otherwise it creates new one using `context.Background()`.
+func (r *Request) Context() context.Context {
+ if r.ctx == nil {
+ return context.Background()
+ }
+ return r.ctx
+}
+
+// SetContext method sets the context.Context for current Request. It allows
+// to interrupt the request execution if ctx.Done() channel is closed.
+// See https://blog.golang.org/context article and the "context" package
+// documentation.
+func (r *Request) SetContext(ctx context.Context) *Request {
+ r.ctx = ctx
+ return r
+}
+
+// SetHeader method is to set a single header field and its value in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`.
+// client.R().
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+//
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeader(header, value string) *Request {
+ r.Header.Set(header, value)
+ return r
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.R().
+// SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaders(headers map[string]string) *Request {
+ for h, v := range headers {
+ r.SetHeader(h, v)
+ }
+ return r
+}
+
+// SetQueryParam method sets single parameter and its value in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParam(param, value string) *Request {
+ r.QueryParam.Set(param, value)
+ return r
+}
+
+// SetQueryParams method sets multiple parameters and its values at one go in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetQueryParam(p, v)
+ }
+ return r
+}
+
+// SetQueryParamsFromValues method appends multiple parameters with multi-value
+// (`url.Values`) at one go in the current request. It will be formed as
+// query string for the request.
+//
+// For Example: `status=pending&status=approved&status=open` in the URL after `?` mark.
+// client.R().
+// SetQueryParamsFromValues(url.Values{
+// "status": []string{"pending", "approved", "open"},
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParamsFromValues(params url.Values) *Request {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ return r
+}
+
+// SetQueryString method provides ability to use string as an input to set URL query string for the request.
+//
+// Using String as an input
+// client.R().
+// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+func (r *Request) SetQueryString(query string) *Request {
+ params, err := url.ParseQuery(strings.TrimSpace(query))
+ if err == nil {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ } else {
+ r.client.log.Errorf("%v", err)
+ }
+ return r
+}
+
+// SetFormData method sets Form parameters and their values in the current request.
+// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as
+// `application/x-www-form-urlencoded`.
+// client.R().
+// SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r.FormData.Set(k, v)
+ }
+ return r
+}
+
+// SetFormDataFromValues method appends multiple form parameters with multi-value
+// (`url.Values`) at one go in the current request.
+// client.R().
+// SetFormDataFromValues(url.Values{
+// "search_criteria": []string{"book", "glass", "pencil"},
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormDataFromValues(data url.Values) *Request {
+ for k, v := range data {
+ for _, kv := range v {
+ r.FormData.Add(k, kv)
+ }
+ }
+ return r
+}
+
+// SetBody method sets the request body for the request. It supports various realtime needs as easy.
+// We can say its quite handy or powerful. Supported request body data types is `string`,
+// `[]byte`, `struct`, `map`, `slice` and `io.Reader`. Body value can be pointer or non-pointer.
+// Automatic marshalling for JSON and XML content type, if it is `struct`, `map`, or `slice`.
+//
+// Note: `io.Reader` is processed as bufferless mode while sending request.
+//
+// For Example: Struct as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(User{
+// Username: "jeeva@myjeeva.com",
+// Password: "welcome2resty",
+// })
+//
+// Map as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(map[string]interface{}{
+// "username": "jeeva@myjeeva.com",
+// "password": "welcome2resty",
+// "address": &Address{
+// Address1: "1111 This is my street",
+// Address2: "Apt 201",
+// City: "My City",
+// State: "My State",
+// ZipCode: 00000,
+// },
+// })
+//
+// String as a body input. Suitable for any need as a string input.
+// client.R().
+// SetBody(`{
+// "username": "jeeva@getrightcare.com",
+// "password": "admin"
+// }`)
+//
+// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc.
+// client.R().
+// SetBody([]byte("This is my raw request, sent as-is"))
+func (r *Request) SetBody(body interface{}) *Request {
+ r.Body = body
+ return r
+}
+
+// SetResult method is to register the response `Result` object for automatic unmarshalling for the request,
+// if response status code is between 200 and 299 and content type either JSON or XML.
+//
+// Note: Result object can be pointer or non-pointer.
+// client.R().SetResult(&AuthToken{})
+// // OR
+// client.R().SetResult(AuthToken{})
+//
+// Accessing a result value from response instance.
+// response.Result().(*AuthToken)
+func (r *Request) SetResult(res interface{}) *Request {
+ r.Result = getPointer(res)
+ return r
+}
+
+// SetError method is to register the request `Error` object for automatic unmarshalling for the request,
+// if response status code is greater than 399 and content type either JSON or XML.
+//
+// Note: Error object can be pointer or non-pointer.
+// client.R().SetError(&AuthError{})
+// // OR
+// client.R().SetError(AuthError{})
+//
+// Accessing a error value from response instance.
+// response.Error().(*AuthError)
+func (r *Request) SetError(err interface{}) *Request {
+ r.Error = getPointer(err)
+ return r
+}
+
+// SetFile method is to set single file field name and its path for multipart upload.
+// client.R().
+// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf")
+func (r *Request) SetFile(param, filePath string) *Request {
+ r.isMultiPart = true
+ r.FormData.Set("@"+param, filePath)
+ return r
+}
+
+// SetFiles method is to set multiple file field name and its path for multipart upload.
+// client.R().
+// SetFiles(map[string]string{
+// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf",
+// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf",
+// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf",
+// })
+func (r *Request) SetFiles(files map[string]string) *Request {
+ r.isMultiPart = true
+ for f, fp := range files {
+ r.FormData.Set("@"+f, fp)
+ }
+ return r
+}
+
+// SetFileReader method is to set single file using io.Reader for multipart upload.
+// client.R().
+// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)).
+// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes))
+func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFiles = append(r.multipartFiles, &File{
+ Name: fileName,
+ ParamName: param,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartField method is to set custom data using io.Reader for multipart upload.
+func (r *Request) SetMultipartField(param, fileName, contentType string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, &MultipartField{
+ Param: param,
+ FileName: fileName,
+ ContentType: contentType,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFields method is to set multiple data fields using io.Reader for multipart upload.
+//
+// For Example:
+// client.R().SetMultipartFields(
+// &resty.MultipartField{
+// Param: "uploadManifest1",
+// FileName: "upload-file-1.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 1", "_filename" : ["file1.txt"]}}`),
+// },
+// &resty.MultipartField{
+// Param: "uploadManifest2",
+// FileName: "upload-file-2.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 2", "_filename" : ["file2.txt"]}}`),
+// })
+//
+// If you have slice already, then simply call-
+// client.R().SetMultipartFields(fields...)
+func (r *Request) SetMultipartFields(fields ...*MultipartField) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, fields...)
+ return r
+}
+
+// SetContentLength method sets the HTTP header `Content-Length` value for current request.
+// By default Resty won't set `Content-Length`. Also you have an option to enable for every
+// request.
+//
+// See `Client.SetContentLength`
+// client.R().SetContentLength(true)
+func (r *Request) SetContentLength(l bool) *Request {
+ r.setContentLength = true
+ return r
+}
+
+// SetBasicAuth method sets the basic authentication header in the current HTTP request.
+//
+// For Example:
+// Authorization: Basic
+//
+// To set the header for username "go-resty" and password "welcome"
+// client.R().SetBasicAuth("go-resty", "welcome")
+//
+// This method overrides the credentials set by method `Client.SetBasicAuth`.
+func (r *Request) SetBasicAuth(username, password string) *Request {
+ r.UserInfo = &User{Username: username, Password: password}
+ return r
+}
+
+// SetAuthToken method sets bearer auth token header in the current HTTP request. Header example:
+// Authorization: Bearer
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This method overrides the Auth token set by method `Client.SetAuthToken`.
+func (r *Request) SetAuthToken(token string) *Request {
+ r.Token = token
+ return r
+}
+
+// SetOutput method sets the output file for current HTTP request. Current HTTP response will be
+// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used.
+// If is it relative path then output file goes under the output directory, as mentioned
+// in the `Client.SetOutputDirectory`.
+// client.R().
+// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip").
+// Get("http://bit.ly/1LouEKr")
+//
+// Note: In this scenario `Response.Body` might be nil.
+func (r *Request) SetOutput(file string) *Request {
+ r.outputFile = file
+ r.isSaveResponse = true
+ return r
+}
+
+// SetSRV method sets the details to query the service SRV record and execute the
+// request.
+// client.R().
+// SetSRV(SRVRecord{"web", "testservice.com"}).
+// Get("/get")
+func (r *Request) SetSRV(srv *SRVRecord) *Request {
+ r.SRV = srv
+ return r
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (r *Request) SetDoNotParseResponse(parse bool) *Request {
+ r.notParseResponse = parse
+ return r
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty current request instance.
+// client.R().SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replace the value of the key while composing request URL. Also you can
+// override Path Params value, which was set at client instance level.
+func (r *Request) SetPathParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.pathParams[p] = v
+ }
+ return r
+}
+
+// ExpectContentType method allows to provide fallback `Content-Type` for automatic unmarshalling
+// when `Content-Type` response header is unavailable.
+func (r *Request) ExpectContentType(contentType string) *Request {
+ r.fallbackContentType = contentType
+ return r
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (r *Request) SetJSONEscapeHTML(b bool) *Request {
+ r.jsonEscapeHTML = b
+ return r
+}
+
+// SetCookie method appends a single cookie in the current request instance.
+// client.R().SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookie(hc *http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, hc)
+ return r
+}
+
+// SetCookies method sets an array of cookies in the current request instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty's current request
+// client.R().SetCookies(cookies)
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookies(rs []*http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, rs...)
+ return r
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP request tracing
+//_______________________________________________________________________
+
+// EnableTrace method enables trace for the current request
+// using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New()
+//
+// resp, err := client.R().EnableTrace().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// See `Client.EnableTrace` available too to get trace info for all requests.
+//
+// Since v2.0.0
+func (r *Request) EnableTrace() *Request {
+ r.trace = true
+ return r
+}
+
+// TraceInfo method returns the trace info for the request.
+//
+// Since v2.0.0
+func (r *Request) TraceInfo() TraceInfo {
+ ct := r.clientTrace
+ return TraceInfo{
+ DNSLookup: ct.dnsDone.Sub(ct.dnsStart),
+ ConnTime: ct.gotConn.Sub(ct.getConn),
+ TLSHandshake: ct.tlsHandshakeDone.Sub(ct.tlsHandshakeStart),
+ ServerTime: ct.gotFirstResponseByte.Sub(ct.wroteRequest),
+ ResponseTime: ct.endTime.Sub(ct.gotFirstResponseByte),
+ TotalTime: ct.endTime.Sub(ct.getConn),
+ IsConnReused: ct.gotConnInfo.Reused,
+ IsConnWasIdle: ct.gotConnInfo.WasIdle,
+ ConnIdleTime: ct.gotConnInfo.IdleTime,
+ }
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP verb method starts here
+//_______________________________________________________________________
+
+// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231.
+func (r *Request) Get(url string) (*Response, error) {
+ return r.Execute(MethodGet, url)
+}
+
+// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231.
+func (r *Request) Head(url string) (*Response, error) {
+ return r.Execute(MethodHead, url)
+}
+
+// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231.
+func (r *Request) Post(url string) (*Response, error) {
+ return r.Execute(MethodPost, url)
+}
+
+// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231.
+func (r *Request) Put(url string) (*Response, error) {
+ return r.Execute(MethodPut, url)
+}
+
+// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231.
+func (r *Request) Delete(url string) (*Response, error) {
+ return r.Execute(MethodDelete, url)
+}
+
+// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231.
+func (r *Request) Options(url string) (*Response, error) {
+ return r.Execute(MethodOptions, url)
+}
+
+// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789.
+func (r *Request) Patch(url string) (*Response, error) {
+ return r.Execute(MethodPatch, url)
+}
+
+// Execute method performs the HTTP request with given HTTP method and URL
+// for current `Request`.
+// resp, err := client.R().Execute(resty.GET, "http://httpbin.org/get")
+func (r *Request) Execute(method, url string) (*Response, error) {
+ var addrs []*net.SRV
+ var err error
+
+ if r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {
+ return nil, fmt.Errorf("multipart content is not allowed in HTTP verb [%v]", method)
+ }
+
+ if r.SRV != nil {
+ _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r.Method = method
+ r.URL = r.selectAddr(addrs, url, 0)
+
+ if r.client.RetryCount == 0 {
+ return r.client.execute(r)
+ }
+
+ var resp *Response
+ attempt := 0
+ err = Backoff(
+ func() (*Response, error) {
+ attempt++
+
+ r.URL = r.selectAddr(addrs, url, attempt)
+
+ resp, err = r.client.execute(r)
+ if err != nil {
+ r.client.log.Errorf("%v, Attempt %v", err, attempt)
+ }
+
+ return resp, err
+ },
+ Retries(r.client.RetryCount),
+ WaitTime(r.client.RetryWaitTime),
+ MaxWaitTime(r.client.RetryMaxWaitTime),
+ RetryConditions(r.client.RetryConditions),
+ )
+
+ return resp, err
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// SRVRecord struct
+//_______________________________________________________________________
+
+// SRVRecord struct holds the data to query the SRV record for the
+// following service.
+type SRVRecord struct {
+ Service string
+ Domain string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Unexported methods
+//_______________________________________________________________________
+
+func (r *Request) fmtBodyString() (body string) {
+ body = "***** NO CONTENT *****"
+ if isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) {
+ if _, ok := r.Body.(io.Reader); ok {
+ body = "***** BODY IS io.Reader *****"
+ return
+ }
+
+ // multipart or form-data
+ if r.isMultiPart || r.isFormData {
+ body = r.bodyBuf.String()
+ return
+ }
+
+ // request body data
+ if r.Body == nil {
+ return
+ }
+ var prtBodyBytes []byte
+ var err error
+
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ if canJSONMarshal(contentType, kind) {
+ prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ")
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ")
+ } else if b, ok := r.Body.(string); ok {
+ if IsJSONType(contentType) {
+ bodyBytes := []byte(b)
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ if err = json.Indent(out, bodyBytes, "", " "); err == nil {
+ prtBodyBytes = out.Bytes()
+ }
+ } else {
+ body = b
+ return
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ body = base64.StdEncoding.EncodeToString(b)
+ }
+
+ if prtBodyBytes != nil && err == nil {
+ body = string(prtBodyBytes)
+ }
+ }
+
+ return
+}
+
+func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string {
+ if addrs == nil {
+ return path
+ }
+
+ idx := attempt % len(addrs)
+ domain := strings.TrimRight(addrs[idx].Target, ".")
+ path = strings.TrimLeft(path, "/")
+
+ return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path)
+}
+
+func (r *Request) initValuesMap() {
+ if r.values == nil {
+ r.values = make(map[string]interface{})
+ }
+}
+
+var noescapeJSONMarshal = func(v interface{}) ([]byte, error) {
+ buf := acquireBuffer()
+ defer releaseBuffer(buf)
+ encoder := json.NewEncoder(buf)
+ encoder.SetEscapeHTML(false)
+ err := encoder.Encode(v)
+ return buf.Bytes(), err
+}
diff --git a/vendor/github.com/go-resty/resty/v2/response.go b/vendor/github.com/go-resty/resty/v2/response.go
new file mode 100644
index 000000000..673aeeba7
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/response.go
@@ -0,0 +1,160 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response struct and methods
+//_______________________________________________________________________
+
+// Response struct holds response values of executed request.
+type Response struct {
+ Request *Request
+ RawResponse *http.Response
+
+ body []byte
+ size int64
+ receivedAt time.Time
+}
+
+// Body method returns HTTP response as []byte array for the executed request.
+//
+// Note: `Response.Body` might be nil, if `Request.SetOutput` is used.
+func (r *Response) Body() []byte {
+ if r.RawResponse == nil {
+ return []byte{}
+ }
+ return r.body
+}
+
+// Status method returns the HTTP status string for the executed request.
+// Example: 200 OK
+func (r *Response) Status() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Status
+}
+
+// StatusCode method returns the HTTP status code for the executed request.
+// Example: 200
+func (r *Response) StatusCode() int {
+ if r.RawResponse == nil {
+ return 0
+ }
+ return r.RawResponse.StatusCode
+}
+
+// Result method returns the response value as an object if it has one
+func (r *Response) Result() interface{} {
+ return r.Request.Result
+}
+
+// Error method returns the error object if it has one
+func (r *Response) Error() interface{} {
+ return r.Request.Error
+}
+
+// Header method returns the response headers
+func (r *Response) Header() http.Header {
+ if r.RawResponse == nil {
+ return http.Header{}
+ }
+ return r.RawResponse.Header
+}
+
+// Cookies method to access all the response cookies
+func (r *Response) Cookies() []*http.Cookie {
+ if r.RawResponse == nil {
+ return make([]*http.Cookie, 0)
+ }
+ return r.RawResponse.Cookies()
+}
+
+// String method returns the body of the server response as String.
+func (r *Response) String() string {
+ if r.body == nil {
+ return ""
+ }
+ return strings.TrimSpace(string(r.body))
+}
+
+// Time method returns the time of HTTP response time that from request we sent and received a request.
+//
+// See `Response.ReceivedAt` to know when client recevied response and see `Response.Request.Time` to know
+// when client sent a request.
+func (r *Response) Time() time.Duration {
+ if r.Request.clientTrace != nil {
+ return r.receivedAt.Sub(r.Request.clientTrace.getConn)
+ }
+ return r.receivedAt.Sub(r.Request.Time)
+}
+
+// ReceivedAt method returns when response got recevied from server for the request.
+func (r *Response) ReceivedAt() time.Time {
+ return r.receivedAt
+}
+
+// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header,
+// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size
+// at the client end. You will get actual size of the http response.
+func (r *Response) Size() int64 {
+ return r.size
+}
+
+// RawBody method exposes the HTTP raw response body. Use this method in-conjunction with `SetDoNotParseResponse`
+// option otherwise you get an error as `read err: http: read on closed response body`.
+//
+// Do not forget to close the body, otherwise you might get into connection leaks, no connection reuse.
+// Basically you have taken over the control of response parsing from `Resty`.
+func (r *Response) RawBody() io.ReadCloser {
+ if r.RawResponse == nil {
+ return nil
+ }
+ return r.RawResponse.Body
+}
+
+// IsSuccess method returns true if HTTP status `code >= 200 and <= 299` otherwise false.
+func (r *Response) IsSuccess() bool {
+ return r.StatusCode() > 199 && r.StatusCode() < 300
+}
+
+// IsError method returns true if HTTP status `code >= 400` otherwise false.
+func (r *Response) IsError() bool {
+ return r.StatusCode() > 399
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Unexported methods
+//_______________________________________________________________________
+
+func (r *Response) fmtBodyString(sl int64) string {
+ if r.body != nil {
+ if int64(len(r.body)) > sl {
+ return fmt.Sprintf("***** RESPONSE TOO LARGE (size - %d) *****", len(r.body))
+ }
+ ct := r.Header().Get(hdrContentTypeKey)
+ if IsJSONType(ct) {
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ err := json.Indent(out, r.body, "", " ")
+ if err != nil {
+ return fmt.Sprintf("*** Error: Unable to format response body - \"%s\" ***\n\nLog Body as-is:\n%s", err, r.String())
+ }
+ return out.String()
+ }
+ return r.String()
+ }
+
+ return "***** NO CONTENT *****"
+}
diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go
new file mode 100644
index 000000000..df2489a53
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/resty.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+// Package resty provides Simple HTTP and REST client library for Go.
+package resty
+
+import (
+ "net"
+ "net/http"
+ "net/http/cookiejar"
+
+ "golang.org/x/net/publicsuffix"
+)
+
+// Version # of resty
+const Version = "2.1.0"
+
+// New method creates a new Resty client.
+func New() *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ })
+}
+
+// NewWithClient method creates a new Resty client with given `http.Client`.
+func NewWithClient(hc *http.Client) *Client {
+ return createClient(hc)
+}
+
+// NewWithLocalAddr method creates a new Resty client with given Local Address
+// to dial from.
+func NewWithLocalAddr(localAddr net.Addr) *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ Transport: createTransport(localAddr),
+ })
+}
diff --git a/vendor/github.com/go-resty/resty/v2/retry.go b/vendor/github.com/go-resty/resty/v2/retry.go
new file mode 100644
index 000000000..5f34cb6f2
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/retry.go
@@ -0,0 +1,180 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "time"
+)
+
+const (
+ defaultMaxRetries = 3
+ defaultWaitTime = time.Duration(100) * time.Millisecond
+ defaultMaxWaitTime = time.Duration(2000) * time.Millisecond
+)
+
+type (
+ // Option is to create convenient retry options like wait time, max retries, etc.
+ Option func(*Options)
+
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ RetryConditionFunc func(*Response, error) bool
+
+ // RetryAfterFunc returns time to wait before retry
+ // For example, it can parse HTTP Retry-After header
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ // Non-nil error is returned if it is found that request is not retryable
+ // (0, nil) is a special result means 'use default algorithm'
+ RetryAfterFunc func(*Client, *Response) (time.Duration, error)
+
+ // Options struct is used to hold retry settings.
+ Options struct {
+ maxRetries int
+ waitTime time.Duration
+ maxWaitTime time.Duration
+ retryConditions []RetryConditionFunc
+ }
+)
+
+// Retries sets the max number of retries
+func Retries(value int) Option {
+ return func(o *Options) {
+ o.maxRetries = value
+ }
+}
+
+// WaitTime sets the default wait time to sleep between requests
+func WaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.waitTime = value
+ }
+}
+
+// MaxWaitTime sets the max wait time to sleep between requests
+func MaxWaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.maxWaitTime = value
+ }
+}
+
+// RetryConditions sets the conditions that will be checked for retry.
+func RetryConditions(conditions []RetryConditionFunc) Option {
+ return func(o *Options) {
+ o.retryConditions = conditions
+ }
+}
+
+// Backoff retries with increasing timeout duration up until X amount of retries
+// (Default is 3 attempts, Override with option Retries(n))
+func Backoff(operation func() (*Response, error), options ...Option) error {
+ // Defaults
+ opts := Options{
+ maxRetries: defaultMaxRetries,
+ waitTime: defaultWaitTime,
+ maxWaitTime: defaultMaxWaitTime,
+ retryConditions: []RetryConditionFunc{},
+ }
+
+ for _, o := range options {
+ o(&opts)
+ }
+
+ var (
+ resp *Response
+ err error
+ )
+
+ for attempt := 0; attempt < opts.maxRetries; attempt++ {
+ resp, err = operation()
+ ctx := context.Background()
+ if resp != nil && resp.Request.ctx != nil {
+ ctx = resp.Request.ctx
+ }
+ if ctx.Err() != nil {
+ return err
+ }
+
+ needsRetry := err != nil // retry on operation errors by default
+
+ for _, condition := range opts.retryConditions {
+ needsRetry = condition(resp, err)
+ if needsRetry {
+ break
+ }
+ }
+
+ if !needsRetry {
+ return err
+ }
+
+ waitTime, err2 := sleepDuration(resp, opts.waitTime, opts.maxWaitTime, attempt)
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ return err
+ }
+
+ select {
+ case <-time.After(waitTime):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ return err
+}
+
+func sleepDuration(resp *Response, min, max time.Duration, attempt int) (time.Duration, error) {
+ const maxInt = 1<<31 - 1 // max int for arch 386
+
+ if max < 0 {
+ max = maxInt
+ }
+
+ if resp == nil {
+ goto defaultCase
+ }
+
+ // 1. Check for custom callback
+ if retryAfterFunc := resp.Request.client.RetryAfter; retryAfterFunc != nil {
+ result, err := retryAfterFunc(resp.Request.client, resp)
+ if err != nil {
+ return 0, err // i.e. 'API quota exceeded'
+ }
+ if result == 0 {
+ goto defaultCase
+ }
+ if result < 0 || max < result {
+ result = max
+ }
+ if result < min {
+ result = min
+ }
+ return result, nil
+ }
+
+ // 2. Return capped exponential backoff with jitter
+ // http://www.awsarchitectureblog.com/2015/03/backoff.html
+defaultCase:
+ base := float64(min)
+ capLevel := float64(max)
+
+ temp := math.Min(capLevel, base*math.Exp2(float64(attempt)))
+ ri := int(temp / 2)
+ if ri <= 0 {
+ ri = maxInt // max int for arch 386
+ }
+ result := time.Duration(math.Abs(float64(ri + rand.Intn(ri))))
+
+ if result < min {
+ result = min
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/trace.go b/vendor/github.com/go-resty/resty/v2/trace.go
new file mode 100644
index 000000000..862a3a067
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/trace.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http/httptrace"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// TraceInfo struct
+//_______________________________________________________________________
+
+// TraceInfo struct is used provide request trace info such as DNS lookup
+// duration, Connection obtain duration, Server processing duration, etc.
+//
+// Since v2.0.0
+type TraceInfo struct {
+ // DNSLookup is a duration that transport took to perform
+ // DNS lookup.
+ DNSLookup time.Duration
+
+ // ConnTime is a duration that took to obtain a successful connection.
+ ConnTime time.Duration
+
+ // TLSHandshake is a duration that TLS handshake took place.
+ TLSHandshake time.Duration
+
+ // ServerTime is a duration that server took to respond first byte.
+ ServerTime time.Duration
+
+ // ResponseTime is a duration since first response byte from server to
+ // request completion.
+ ResponseTime time.Duration
+
+ // TotalTime is a duration that total request took end-to-end.
+ TotalTime time.Duration
+
+ // IsConnReused is whether this connection has been previously
+ // used for another HTTP request.
+ IsConnReused bool
+
+ // IsConnWasIdle is whether this connection was obtained from an
+ // idle pool.
+ IsConnWasIdle bool
+
+ // ConnIdleTime is a duration how long the connection was previously
+ // idle, if IsConnWasIdle is true.
+ ConnIdleTime time.Duration
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// CientTrace struct and its methods
+//_______________________________________________________________________
+
+// tracer struct maps the `httptrace.ClientTrace` hooks into Fields
+// with same naming for easy understanding. Plus additional insights
+// Request.
+type clientTrace struct {
+ getConn time.Time
+ gotConn time.Time
+ gotFirstResponseByte time.Time
+ dnsStart time.Time
+ dnsDone time.Time
+ tlsHandshakeStart time.Time
+ tlsHandshakeDone time.Time
+ wroteRequest time.Time
+ endTime time.Time
+ gotConnInfo httptrace.GotConnInfo
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Trace unexported methods
+//_______________________________________________________________________
+
+func (t *clientTrace) createContext(ctx context.Context) context.Context {
+ return httptrace.WithClientTrace(
+ ctx,
+ &httptrace.ClientTrace{
+ GetConn: func(_ string) {
+ t.getConn = time.Now()
+ },
+ GotConn: func(ci httptrace.GotConnInfo) {
+ t.gotConn = time.Now()
+ t.gotConnInfo = ci
+ },
+ GotFirstResponseByte: func() {
+ t.gotFirstResponseByte = time.Now()
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ t.dnsStart = time.Now()
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ t.dnsDone = time.Now()
+ },
+ TLSHandshakeStart: func() {
+ t.tlsHandshakeStart = time.Now()
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
+ t.tlsHandshakeDone = time.Now()
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ t.wroteRequest = time.Now()
+ },
+ },
+ )
+}
diff --git a/vendor/github.com/go-resty/resty/v2/util.go b/vendor/github.com/go-resty/resty/v2/util.go
new file mode 100644
index 000000000..814314115
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/util.go
@@ -0,0 +1,333 @@
+// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "log"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Logger interface
+//_______________________________________________________________________
+
+// Logger interface is to abstract the logging from Resty. Gives control to
+// the Resty users, choice of the logger.
+type Logger interface {
+ Errorf(format string, v ...interface{})
+ Warnf(format string, v ...interface{})
+ Debugf(format string, v ...interface{})
+}
+
+func createLogger() *logger {
+ l := &logger{l: log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)}
+ return l
+}
+
+var _ Logger = (*logger)(nil)
+
+type logger struct {
+ l *log.Logger
+}
+
+func (l *logger) Errorf(format string, v ...interface{}) {
+ l.output("ERROR RESTY "+format, v...)
+}
+
+func (l *logger) Warnf(format string, v ...interface{}) {
+ l.output("WARN RESTY "+format, v...)
+}
+
+func (l *logger) Debugf(format string, v ...interface{}) {
+ l.output("DEBUG RESTY "+format, v...)
+}
+
+func (l *logger) output(format string, v ...interface{}) {
+ if len(v) == 0 {
+ l.l.Print(format)
+ return
+ }
+ l.l.Printf(format, v...)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Helper methods
+//_______________________________________________________________________
+
+// IsStringEmpty method tells whether given string is empty or not
+func IsStringEmpty(str string) bool {
+ return len(strings.TrimSpace(str)) == 0
+}
+
+// DetectContentType method is used to figure out `Request.Body` content type for request header
+func DetectContentType(body interface{}) string {
+ contentType := plainTextType
+ kind := kindOf(body)
+ switch kind {
+ case reflect.Struct, reflect.Map:
+ contentType = jsonContentType
+ case reflect.String:
+ contentType = plainTextType
+ default:
+ if b, ok := body.([]byte); ok {
+ contentType = http.DetectContentType(b)
+ } else if kind == reflect.Slice {
+ contentType = jsonContentType
+ }
+ }
+
+ return contentType
+}
+
+// IsJSONType method is to check JSON content type or not
+func IsJSONType(ct string) bool {
+ return jsonCheck.MatchString(ct)
+}
+
+// IsXMLType method is to check XML content type or not
+func IsXMLType(ct string) bool {
+ return xmlCheck.MatchString(ct)
+}
+
+// Unmarshalc content into object from JSON or XML
+func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) {
+ if IsJSONType(ct) {
+ err = c.JSONUnmarshal(b, d)
+ } else if IsXMLType(ct) {
+ err = xml.Unmarshal(b, d)
+ }
+
+ return
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// RequestLog and ResponseLog type
+//_______________________________________________________________________
+
+// RequestLog struct is used to collected information from resty request
+// instance for debug logging. It sent to request log callback before resty
+// actually logs the information.
+type RequestLog struct {
+ Header http.Header
+ Body string
+}
+
+// ResponseLog struct is used to collected information from resty response
+// instance for debug logging. It sent to response log callback before resty
+// actually logs the information.
+type ResponseLog struct {
+ Header http.Header
+ Body string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+// way to disable the HTML escape as opt-in
+func jsonMarshal(c *Client, r *Request, d interface{}) ([]byte, error) {
+ if !r.jsonEscapeHTML {
+ return noescapeJSONMarshal(d)
+ } else if !c.jsonEscapeHTML {
+ return noescapeJSONMarshal(d)
+ }
+ return c.JSONMarshal(d)
+}
+
+func firstNonEmpty(v ...string) string {
+ for _, s := range v {
+ if !IsStringEmpty(s) {
+ return s
+ }
+ }
+ return ""
+}
+
+var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
+
+func escapeQuotes(s string) string {
+ return quoteEscaper.Replace(s)
+}
+
+func createMultipartHeader(param, fileName, contentType string) textproto.MIMEHeader {
+ hdr := make(textproto.MIMEHeader)
+
+ var contentDispositionValue string
+ if IsStringEmpty(fileName) {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"`, param)
+ } else {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
+ param, escapeQuotes(fileName))
+ }
+ hdr.Set("Content-Disposition", contentDispositionValue)
+
+ if !IsStringEmpty(contentType) {
+ hdr.Set(hdrContentTypeKey, contentType)
+ }
+ return hdr
+}
+
+func addMultipartFormField(w *multipart.Writer, mf *MultipartField) error {
+ partWriter, err := w.CreatePart(createMultipartHeader(mf.Param, mf.FileName, mf.ContentType))
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, mf.Reader)
+ return err
+}
+
+func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r io.Reader) error {
+ // Auto detect actual multipart content type
+ cbuf := make([]byte, 512)
+ size, err := r.Read(cbuf)
+ if err != nil {
+ return err
+ }
+
+ partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf)))
+ if err != nil {
+ return err
+ }
+
+ if _, err = partWriter.Write(cbuf[:size]); err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, r)
+ return err
+}
+
+func addFile(w *multipart.Writer, fieldName, path string) error {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer closeq(file)
+ return writeMultipartFormFile(w, fieldName, filepath.Base(path), file)
+}
+
+func addFileReader(w *multipart.Writer, f *File) error {
+ return writeMultipartFormFile(w, f.ParamName, f.Name, f.Reader)
+}
+
+func getPointer(v interface{}) interface{} {
+ vv := valueOf(v)
+ if vv.Kind() == reflect.Ptr {
+ return v
+ }
+ return reflect.New(vv.Type()).Interface()
+}
+
+func isPayloadSupported(m string, allowMethodGet bool) bool {
+ return !(m == MethodHead || m == MethodOptions || (m == MethodGet && !allowMethodGet))
+}
+
+func typeOf(i interface{}) reflect.Type {
+ return indirect(valueOf(i)).Type()
+}
+
+func valueOf(i interface{}) reflect.Value {
+ return reflect.ValueOf(i)
+}
+
+func indirect(v reflect.Value) reflect.Value {
+ return reflect.Indirect(v)
+}
+
+func kindOf(v interface{}) reflect.Kind {
+ return typeOf(v).Kind()
+}
+
+func createDirectory(dir string) (err error) {
+ if _, err = os.Stat(dir); err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func canJSONMarshal(contentType string, kind reflect.Kind) bool {
+ return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice)
+}
+
+func functionName(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+func acquireBuffer() *bytes.Buffer {
+ return bufPool.Get().(*bytes.Buffer)
+}
+
+func releaseBuffer(buf *bytes.Buffer) {
+ if buf != nil {
+ buf.Reset()
+ bufPool.Put(buf)
+ }
+}
+
+func closeq(v interface{}) {
+ if c, ok := v.(io.Closer); ok {
+ sliently(c.Close())
+ }
+}
+
+func sliently(_ ...interface{}) {}
+
+func composeHeaders(c *Client, r *Request, hdrs http.Header) string {
+ str := make([]string, 0, len(hdrs))
+ for _, k := range sortHeaderKeys(hdrs) {
+ var v string
+ if k == "Cookie" {
+ cv := strings.TrimSpace(strings.Join(hdrs[k], ", "))
+ for _, c := range c.GetClient().Jar.Cookies(r.RawRequest.URL) {
+ if cv != "" {
+ cv = cv + "; " + c.String()
+ } else {
+ cv = c.String()
+ }
+ }
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, cv))
+ } else {
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, strings.Join(hdrs[k], ", ")))
+ }
+ if v != "" {
+ str = append(str, "\t"+v)
+ }
+ }
+ return strings.Join(str, "\n")
+}
+
+func sortHeaderKeys(hdrs http.Header) []string {
+ keys := make([]string, 0, len(hdrs))
+ for key := range hdrs {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func copyHeaders(hdrs http.Header) http.Header {
+ nh := http.Header{}
+ for k, v := range hdrs {
+ nh[k] = v
+ }
+ return nh
+}
diff --git a/vendor/github.com/go-zookeeper/zk/.codecov.yaml b/vendor/github.com/go-zookeeper/zk/.codecov.yaml
new file mode 100644
index 000000000..98475205d
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/.codecov.yaml
@@ -0,0 +1,8 @@
+coverage:
+ status:
+ patch:
+ default:
+ target: 75%
+ project:
+ default:
+ threshold: 1%
diff --git a/vendor/github.com/go-zookeeper/zk/.gitignore b/vendor/github.com/go-zookeeper/zk/.gitignore
new file mode 100644
index 000000000..9c83a9667
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/.gitignore
@@ -0,0 +1,8 @@
+.vscode/
+.DS_Store
+profile.cov
+zookeeper
+zookeeper-*/
+zookeeper-*.tar.gz
+apache-zookeeper-*/
+apache-zookeeper-*.tar.gz
diff --git a/vendor/github.com/go-zookeeper/zk/CONTRIBUTION.md b/vendor/github.com/go-zookeeper/zk/CONTRIBUTION.md
new file mode 100644
index 000000000..b7c6e258d
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/CONTRIBUTION.md
@@ -0,0 +1,57 @@
+# how to contribute to the go zookeeper library
+
+## **Did you find a bug?**
+
+* **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/go-zookeper/zk/issues).
+
+* If you're unable to find an open issue addressing the problem, open a new one.
+ * Be sure to include a title and clear description.
+ * Be sure to include the actual behavior vs the expected.
+ * As much relevant information as possible, a code sample or an executable test case demonstrating the expected vs actual behavior.
+
+## Did you write a patch that fixes a bug
+
+* Ensure that all bugs are first reported as an issue. This will help others in finding fixes through issues first.
+
+* Open a PR referencing the issue for the bug.
+
+## Pull Requests
+
+We are open to all Pull Requests, its best to accompany the requests with an issue.
+
+* The PR requires the github actions to pass.
+
+* Requires at least one maintainer to approve the PR to merge to master.
+
+While the above must be satisfied prior to having your pull request reviewed, the reviewer(s) may ask you to complete additional design work, tests, or other changes before your pull request can be ultimately accepted.
+
+## Versioned Releases
+
+Since this library is a core client for interacting with Zookeeper, we do [SemVer](https://semver.org/) releases to ensure predictable changes for users.
+
+Zookeeper itself maintains a compatibility check on the main codebase as well as maintaining backwards compatibility through all Major releases, this core library will try to uphold similar standards of releases.
+
+* Code that is merged into master should be ready for release at any given time.
+ * This is to say, that code should not be merged into master if it is not complete and ready for production use.
+
+* If a fix needs to be released ahead of normal operations, file an issue explaining the urgency and impact of the bug.
+
+## Coding guidelines
+
+Some good external resources for style:
+
+1. [Effective Go](https://golang.org/doc/effective_go.html)
+2. [The Go common mistakes guide](https://github.com/golang/go/wiki/CodeReviewComments)
+
+All code should be error-free when run through `golint` and `go vet`. We
+recommend setting up your editor to:
+
+* Run `goimports` on save
+* Run `golint` and `go vet` to check for errors
+
+You can find information in editor support for Go tools here:
+
+
+## Addition information
+
+* We have zero external dependencies, and would like to maintain this. Use of any external go library should be limited to tests.
diff --git a/vendor/github.com/samuel/go-zookeeper/LICENSE b/vendor/github.com/go-zookeeper/zk/LICENSE
similarity index 100%
rename from vendor/github.com/samuel/go-zookeeper/LICENSE
rename to vendor/github.com/go-zookeeper/zk/LICENSE
diff --git a/vendor/github.com/go-zookeeper/zk/Makefile b/vendor/github.com/go-zookeeper/zk/Makefile
new file mode 100644
index 000000000..f0b7965cd
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/Makefile
@@ -0,0 +1,51 @@
+# make file to hold the logic of build and test setup
+ZK_VERSION ?= 3.5.6
+
+# Apache changed the name of the archive in version 3.5.x and seperated out
+# src and binary packages
+ZK_MINOR_VER=$(word 2, $(subst ., ,$(ZK_VERSION)))
+ifeq ($(shell test $(ZK_MINOR_VER) -le 4; echo $$?),0)
+ ZK = zookeeper-$(ZK_VERSION)
+else
+ ZK = apache-zookeeper-$(ZK_VERSION)-bin
+endif
+ZK_URL = "https://archive.apache.org/dist/zookeeper/zookeeper-$(ZK_VERSION)/$(ZK).tar.gz"
+
+PACKAGES := $(shell go list ./... | grep -v examples)
+
+.DEFAULT_GOAL := test
+
+$(ZK):
+ wget $(ZK_URL)
+ tar -zxf $(ZK).tar.gz
+ rm $(ZK).tar.gz
+
+zookeeper: $(ZK)
+ # we link to a standard directory path so then the tests dont need to find based on version
+ # in the test code. this allows backward compatable testing.
+ ln -s $(ZK) zookeeper
+
+.PHONY: setup
+setup: zookeeper
+
+.PHONY: lint
+lint:
+ go fmt ./...
+ go vet ./...
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test: build zookeeper
+ go test -timeout 500s -v -race -covermode atomic -coverprofile=profile.cov $(PACKAGES)
+
+.PHONY: clean
+clean:
+ rm -f apache-zookeeper-*.tar.gz
+ rm -f zookeeper-*.tar.gz
+ rm -rf apache-zookeeper-*/
+ rm -rf zookeeper-*/
+ rm -f zookeeper
+ rm -f profile.cov
diff --git a/vendor/github.com/go-zookeeper/zk/README.md b/vendor/github.com/go-zookeeper/zk/README.md
new file mode 100644
index 000000000..0028096f3
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/README.md
@@ -0,0 +1,11 @@
+Native Go Zookeeper Client Library
+===================================
+
+[![GoDoc](https://godoc.org/github.com/go-zookeeper/zk?status.svg)](https://godoc.org/github.com/go-zookeeper/zk)
+[![Build Status](https://img.shields.io/github/workflow/status/go-zookeeper/zk/unittest/master)](https://github.com/go-zookeeper/zk/actions?query=branch%3Amaster)
+[![Coverage Status](https://img.shields.io/codecov/c/github/go-zookeeper/zk/master)](https://codecov.io/gh/go-zookeeper/zk/branch/master)
+
+License
+-------
+
+3-clause BSD. See LICENSE file.
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/go-zookeeper/zk/conn.go
similarity index 85%
rename from vendor/github.com/samuel/go-zookeeper/zk/conn.go
rename to vendor/github.com/go-zookeeper/zk/conn.go
index da9503a27..97377ecea 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/conn.go
+++ b/vendor/github.com/go-zookeeper/zk/conn.go
@@ -10,13 +10,13 @@ Possible watcher events:
*/
import (
+ "context"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
- "strconv"
"strings"
"sync"
"sync/atomic"
@@ -82,6 +82,7 @@ type Conn struct {
eventChan chan Event
eventCallback EventCallback // may be nil
shouldQuit chan struct{}
+ shouldQuitOnce sync.Once
pingInterval time.Duration
recvTimeout time.Duration
connectTimeout time.Duration
@@ -101,9 +102,10 @@ type Conn struct {
reconnectLatch chan struct{}
setWatchLimit int
setWatchCallback func([]*setWatchesRequest)
+
// Debug (for recurring re-auth hang)
debugCloseRecvLoop bool
- debugReauthDone chan struct{}
+ resendZkAuthFn func(context.Context, *Conn) error
logger Logger
logInfo bool // true if information messages are logged; false if only errors are logged
@@ -177,15 +179,7 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti
return nil, nil, errors.New("zk: server list must not be empty")
}
- srvs := make([]string, len(servers))
-
- for i, addr := range servers {
- if strings.Contains(addr, ":") {
- srvs[i] = addr
- } else {
- srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
- }
- }
+ srvs := FormatServers(servers)
// Randomize the order of the servers to avoid creating hotspots
stringShuffle(srvs)
@@ -206,6 +200,7 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti
logger: DefaultLogger,
logInfo: true, // default is true for backwards compatability
buf: make([]byte, bufferSize),
+ resendZkAuthFn: resendZkAuth,
}
// Set provided options.
@@ -218,9 +213,11 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti
}
conn.setTimeouts(int32(sessionTimeout / time.Millisecond))
+ // TODO: This context should be passed in by the caller to be the connection lifecycle context.
+ ctx := context.Background()
go func() {
- conn.loop()
+ conn.loop(ctx)
conn.flushRequests(ErrClosing)
conn.invalidateWatches(ErrClosing)
close(conn.eventChan)
@@ -309,13 +306,17 @@ func WithMaxConnBufferSize(maxBufferSize int) connOption {
}
}
+// Close will submit a close request with ZK and signal the connection to stop
+// sending and receiving packets.
func (c *Conn) Close() {
- close(c.shouldQuit)
+ c.shouldQuitOnce.Do(func() {
+ close(c.shouldQuit)
- select {
- case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
- case <-time.After(time.Second):
- }
+ select {
+ case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
+ case <-time.After(time.Second):
+ }
+ })
}
// State returns the current state of the connection.
@@ -364,7 +365,9 @@ func (c *Conn) connect() error {
c.serverMu.Lock()
c.server, retryStart = c.hostProvider.Next()
c.serverMu.Unlock()
+
c.setState(StateConnecting)
+
if retryStart {
c.flushUnsentRequests(ErrNoServer)
select {
@@ -382,70 +385,12 @@ func (c *Conn) connect() error {
c.conn = zkConn
c.setState(StateConnected)
if c.logInfo {
- c.logger.Printf("Connected to %s", c.Server())
+ c.logger.Printf("connected to %s", c.Server())
}
return nil
}
- c.logger.Printf("Failed to connect to %s: %+v", c.Server(), err)
- }
-}
-
-func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) {
- shouldCancel := func() bool {
- select {
- case <-c.shouldQuit:
- return true
- case <-c.closeChan:
- return true
- default:
- return false
- }
- }
-
- c.credsMu.Lock()
- defer c.credsMu.Unlock()
-
- defer close(reauthReadyChan)
-
- if c.logInfo {
- c.logger.Printf("re-submitting `%d` credentials after reconnect", len(c.creds))
- }
-
- for _, cred := range c.creds {
- if shouldCancel() {
- return
- }
- resChan, err := c.sendRequest(
- opSetAuth,
- &setAuthRequest{Type: 0,
- Scheme: cred.scheme,
- Auth: cred.auth,
- },
- &setAuthResponse{},
- nil)
-
- if err != nil {
- c.logger.Printf("call to sendRequest failed during credential resubmit: %s", err)
- // FIXME(prozlach): lets ignore errors for now
- continue
- }
-
- var res response
- select {
- case res = <-resChan:
- case <-c.closeChan:
- c.logger.Printf("recv closed, cancel re-submitting credentials")
- return
- case <-c.shouldQuit:
- c.logger.Printf("should quit, cancel re-submitting credentials")
- return
- }
- if res.err != nil {
- c.logger.Printf("credential re-submit failed: %s", res.err)
- // FIXME(prozlach): lets ignore errors for now
- continue
- }
+ c.logger.Printf("failed to connect to %s: %v", c.Server(), err)
}
}
@@ -474,7 +419,7 @@ func (c *Conn) sendRequest(
return rq.recvChan, nil
}
-func (c *Conn) loop() {
+func (c *Conn) loop(ctx context.Context) {
for {
if err := c.connect(); err != nil {
// c.Close() was called
@@ -495,25 +440,29 @@ func (c *Conn) loop() {
}
c.hostProvider.Connected() // mark success
c.closeChan = make(chan struct{}) // channel to tell send loop stop
- reauthChan := make(chan struct{}) // channel to tell send loop that authdata has been resubmitted
var wg sync.WaitGroup
+
wg.Add(1)
go func() {
- <-reauthChan
- if c.debugCloseRecvLoop {
- close(c.debugReauthDone)
+ defer c.conn.Close() // causes recv loop to EOF/exit
+ defer wg.Done()
+
+ if err := c.resendZkAuthFn(ctx, c); err != nil {
+ c.logger.Printf("error in resending auth creds: %v", err)
+ return
}
- err := c.sendLoop()
- if err != nil || c.logInfo {
- c.logger.Printf("send loop terminated: err=%v", err)
+
+ if err := c.sendLoop(); err != nil || c.logInfo {
+ c.logger.Printf("send loop terminated: %v", err)
}
- c.conn.Close() // causes recv loop to EOF/exit
- wg.Done()
}()
wg.Add(1)
go func() {
+ defer close(c.closeChan) // tell send loop to exit
+ defer wg.Done()
+
var err error
if c.debugCloseRecvLoop {
err = errors.New("DEBUG: close recv loop")
@@ -521,17 +470,13 @@ func (c *Conn) loop() {
err = c.recvLoop(c.conn)
}
if err != io.EOF || c.logInfo {
- c.logger.Printf("recv loop terminated: err=%v", err)
+ c.logger.Printf("recv loop terminated: %v", err)
}
if err == nil {
panic("zk: recvLoop should never return nil error")
}
- close(c.closeChan) // tell send loop to exit
- wg.Done()
}()
- c.resendZkAuth(reauthChan)
-
c.sendSetWatches()
wg.Wait()
}
@@ -671,7 +616,7 @@ func (c *Conn) sendSetWatches() {
for _, req := range reqs {
_, err := c.request(opSetWatches, req, res, nil)
if err != nil {
- c.logger.Printf("Failed to set previous watches: %s", err.Error())
+ c.logger.Printf("Failed to set previous watches: %v", err)
break
}
}
@@ -695,28 +640,20 @@ func (c *Conn) authenticate() error {
binary.BigEndian.PutUint32(buf[:4], uint32(n))
- if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10)); err != nil {
- return err
- }
+ c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = c.conn.Write(buf[:n+4])
+ c.conn.SetWriteDeadline(time.Time{})
if err != nil {
return err
}
- if err := c.conn.SetWriteDeadline(time.Time{}); err != nil {
- return err
- }
// Receive and decode a connect response.
- if err := c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10)); err != nil {
- return err
- }
+ c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = io.ReadFull(c.conn, buf[:4])
+ c.conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
- if err := c.conn.SetReadDeadline(time.Time{}); err != nil {
- return err
- }
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
@@ -778,18 +715,14 @@ func (c *Conn) sendData(req *request) error {
c.requests[req.xid] = req
c.requestsLock.Unlock()
- if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)); err != nil {
- return err
- }
+ c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = c.conn.Write(c.buf[:n+4])
+ c.conn.SetWriteDeadline(time.Time{})
if err != nil {
req.recvChan <- response{-1, err}
c.conn.Close()
return err
}
- if err := c.conn.SetWriteDeadline(time.Time{}); err != nil {
- return err
- }
return nil
}
@@ -812,17 +745,13 @@ func (c *Conn) sendLoop() error {
binary.BigEndian.PutUint32(c.buf[:4], uint32(n))
- if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)); err != nil {
- return err
- }
+ c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = c.conn.Write(c.buf[:n+4])
+ c.conn.SetWriteDeadline(time.Time{})
if err != nil {
c.conn.Close()
return err
}
- if err := c.conn.SetWriteDeadline(time.Time{}); err != nil {
- return err
- }
case <-c.closeChan:
return nil
}
@@ -854,12 +783,10 @@ func (c *Conn) recvLoop(conn net.Conn) error {
}
_, err = io.ReadFull(conn, buf[:blen])
+ conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
- if err := conn.SetReadDeadline(time.Time{}); err != nil {
- return err
- }
res := responseHeader{}
_, err = decodePacket(buf[:16], &res)
@@ -892,7 +819,7 @@ func (c *Conn) recvLoop(conn net.Conn) error {
c.watchersLock.Lock()
for _, t := range wTypes {
wpt := watchPathType{res.Path, t}
- if watchers, ok := c.watchers[wpt]; ok {
+ if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
for _, ch := range watchers {
ch <- ev
close(ch)
@@ -957,16 +884,51 @@ func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recv
opcode: opcode,
pkt: req,
recvStruct: res,
- recvChan: make(chan response, 1),
+ recvChan: make(chan response, 2),
recvFunc: recvFunc,
}
- c.sendChan <- rq
+
+ switch opcode {
+ case opClose:
+ // always attempt to send close ops.
+ select {
+ case c.sendChan <- rq:
+ case <-time.After(c.connectTimeout * 2):
+ c.logger.Printf("gave up trying to send opClose to server")
+ rq.recvChan <- response{-1, ErrConnectionClosed}
+ }
+ default:
+ // otherwise avoid deadlocks for dumb clients who aren't aware that
+ // the ZK connection is closed yet.
+ select {
+ case <-c.shouldQuit:
+ rq.recvChan <- response{-1, ErrConnectionClosed}
+ case c.sendChan <- rq:
+ // check for a tie
+ select {
+ case <-c.shouldQuit:
+ // maybe the caller gets this, maybe not- we tried.
+ rq.recvChan <- response{-1, ErrConnectionClosed}
+ default:
+ }
+ }
+ }
return rq.recvChan
}
func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
r := <-c.queueRequest(opcode, req, res, recvFunc)
- return r.zxid, r.err
+ select {
+ case <-c.shouldQuit:
+ // queueRequest() can be racy, double-check for the race here and avoid
+ // a potential data-race. otherwise the client of this func may try to
+ // access `res` fields concurrently w/ the async response processor.
+ // NOTE: callers of this func should check for (at least) ErrConnectionClosed
+ // and avoid accessing fields of the response object if such error is present.
+ return -1, ErrConnectionClosed
+ default:
+ return r.zxid, r.err
+ }
}
func (c *Conn) AddAuth(scheme string, auth []byte) error {
@@ -1002,6 +964,9 @@ func (c *Conn) Children(path string) ([]string, *Stat, error) {
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, nil, err
+ }
return res.Children, &res.Stat, err
}
@@ -1030,6 +995,9 @@ func (c *Conn) Get(path string) ([]byte, *Stat, error) {
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, nil, err
+ }
return res.Data, &res.Stat, err
}
@@ -1059,6 +1027,9 @@ func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
res := &setDataResponse{}
_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, err
+ }
return &res.Stat, err
}
@@ -1069,6 +1040,35 @@ func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string,
res := &createResponse{}
_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
+ if err == ErrConnectionClosed {
+ return "", err
+ }
+ return res.Path, err
+}
+
+func (c *Conn) CreateContainer(path string, data []byte, flags int32, acl []ACL) (string, error) {
+ if err := validatePath(path, flags&FlagSequence == FlagSequence); err != nil {
+ return "", err
+ }
+ if flags&FlagTTL != FlagTTL {
+ return "", ErrInvalidFlags
+ }
+
+ res := &createResponse{}
+ _, err := c.request(opCreateContainer, &CreateContainerRequest{path, data, acl, flags}, res, nil)
+ return res.Path, err
+}
+
+func (c *Conn) CreateTTL(path string, data []byte, flags int32, acl []ACL, ttl time.Duration) (string, error) {
+ if err := validatePath(path, flags&FlagSequence == FlagSequence); err != nil {
+ return "", err
+ }
+ if flags&FlagTTL != FlagTTL {
+ return "", ErrInvalidFlags
+ }
+
+ res := &createResponse{}
+ _, err := c.request(opCreateTTL, &CreateTTLRequest{path, data, acl, flags, ttl.Milliseconds()}, res, nil)
return res.Path, err
}
@@ -1137,6 +1137,9 @@ func (c *Conn) Exists(path string) (bool, *Stat, error) {
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
+ if err == ErrConnectionClosed {
+ return false, nil, err
+ }
exists := true
if err == ErrNoNode {
exists = false
@@ -1177,6 +1180,9 @@ func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
res := &getAclResponse{}
_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, nil, err
+ }
return res.Acl, &res.Stat, err
}
func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
@@ -1186,6 +1192,9 @@ func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
res := &setAclResponse{}
_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, err
+ }
return &res.Stat, err
}
@@ -1196,6 +1205,9 @@ func (c *Conn) Sync(path string) (string, error) {
res := &syncResponse{}
_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
+ if err == ErrConnectionClosed {
+ return "", err
+ }
return res.Path, err
}
@@ -1231,6 +1243,9 @@ func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
}
res := &multiResponse{}
_, err := c.request(opMulti, req, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, err
+ }
mr := make([]MultiResponse, len(res.Ops))
for i, op := range res.Ops {
mr[i] = MultiResponse{Stat: op.Stat, String: op.String, Error: op.Err.toError()}
@@ -1239,8 +1254,11 @@ func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
}
// IncrementalReconfig is the zookeeper reconfiguration api that allows adding and removing servers
-// by lists of members.
-// Return the new configuration stats.
+// by lists of members. For more info refer to the ZK documentation.
+//
+// An optional version allows for conditional reconfigurations, -1 ignores the condition.
+//
+// Returns the new configuration znode stat.
func (c *Conn) IncrementalReconfig(joining, leaving []string, version int64) (*Stat, error) {
// TODO: validate the shape of the member string to give early feedback.
request := &reconfigRequest{
@@ -1252,9 +1270,12 @@ func (c *Conn) IncrementalReconfig(joining, leaving []string, version int64) (*S
return c.internalReconfig(request)
}
-// Reconfig is the non-incremental update functionality for Zookeeper where the list preovided
-// is the entire new member list.
-// the optional version allows for conditional reconfigurations, -1 ignores the condition.
+// Reconfig is the non-incremental update functionality for Zookeeper where the list provided
+// is the entire new member list. For more info refer to the ZK documentation.
+//
+// An optional version allows for conditional reconfigurations, -1 ignores the condition.
+//
+// Returns the new configuration znode stat.
func (c *Conn) Reconfig(members []string, version int64) (*Stat, error) {
request := &reconfigRequest{
NewMembers: []byte(strings.Join(members, ",")),
@@ -1276,3 +1297,62 @@ func (c *Conn) Server() string {
defer c.serverMu.Unlock()
return c.server
}
+
+func resendZkAuth(ctx context.Context, c *Conn) error {
+ shouldCancel := func() bool {
+ select {
+ case <-c.shouldQuit:
+ return true
+ case <-c.closeChan:
+ return true
+ default:
+ return false
+ }
+ }
+
+ c.credsMu.Lock()
+ defer c.credsMu.Unlock()
+
+ if c.logInfo {
+ c.logger.Printf("re-submitting `%d` credentials after reconnect", len(c.creds))
+ }
+
+ for _, cred := range c.creds {
+ // return early before attempting to send request.
+ if shouldCancel() {
+ return nil
+ }
+ // do not use the public API for auth since it depends on the send/recv loops
+ // that are waiting for this to return
+ resChan, err := c.sendRequest(
+ opSetAuth,
+ &setAuthRequest{Type: 0,
+ Scheme: cred.scheme,
+ Auth: cred.auth,
+ },
+ &setAuthResponse{},
+ nil, /* recvFunc*/
+ )
+ if err != nil {
+ return fmt.Errorf("failed to send auth request: %v", err)
+ }
+
+ var res response
+ select {
+ case res = <-resChan:
+ case <-c.closeChan:
+ c.logger.Printf("recv closed, cancel re-submitting credentials")
+ return nil
+ case <-c.shouldQuit:
+ c.logger.Printf("should quit, cancel re-submitting credentials")
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ if res.err != nil {
+ return fmt.Errorf("failed conneciton setAuth request: %v", res.err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-zookeeper/zk/constants.go b/vendor/github.com/go-zookeeper/zk/constants.go
new file mode 100644
index 000000000..d914301f2
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/constants.go
@@ -0,0 +1,255 @@
+package zk
+
+import (
+ "errors"
+ "fmt"
+)
+
+const (
+ protocolVersion = 0
+
+ DefaultPort = 2181
+)
+
+const (
+ opNotify = 0
+ opCreate = 1
+ opDelete = 2
+ opExists = 3
+ opGetData = 4
+ opSetData = 5
+ opGetAcl = 6
+ opSetAcl = 7
+ opGetChildren = 8
+ opSync = 9
+ opPing = 11
+ opGetChildren2 = 12
+ opCheck = 13
+ opMulti = 14
+ opReconfig = 16
+ opCreateContainer = 19
+ opCreateTTL = 21
+ opClose = -11
+ opSetAuth = 100
+ opSetWatches = 101
+ opError = -1
+ // Not in protocol, used internally
+ opWatcherEvent = -2
+)
+
+const (
+ EventNodeCreated EventType = 1
+ EventNodeDeleted EventType = 2
+ EventNodeDataChanged EventType = 3
+ EventNodeChildrenChanged EventType = 4
+
+ EventSession EventType = -1
+ EventNotWatching EventType = -2
+)
+
+var (
+ eventNames = map[EventType]string{
+ EventNodeCreated: "EventNodeCreated",
+ EventNodeDeleted: "EventNodeDeleted",
+ EventNodeDataChanged: "EventNodeDataChanged",
+ EventNodeChildrenChanged: "EventNodeChildrenChanged",
+ EventSession: "EventSession",
+ EventNotWatching: "EventNotWatching",
+ }
+)
+
+const (
+ StateUnknown State = -1
+ StateDisconnected State = 0
+ StateConnecting State = 1
+ StateAuthFailed State = 4
+ StateConnectedReadOnly State = 5
+ StateSaslAuthenticated State = 6
+ StateExpired State = -112
+
+ StateConnected = State(100)
+ StateHasSession = State(101)
+)
+
+const (
+ FlagEphemeral = 1
+ FlagSequence = 2
+ FlagTTL = 4
+)
+
+var (
+ stateNames = map[State]string{
+ StateUnknown: "StateUnknown",
+ StateDisconnected: "StateDisconnected",
+ StateConnectedReadOnly: "StateConnectedReadOnly",
+ StateSaslAuthenticated: "StateSaslAuthenticated",
+ StateExpired: "StateExpired",
+ StateAuthFailed: "StateAuthFailed",
+ StateConnecting: "StateConnecting",
+ StateConnected: "StateConnected",
+ StateHasSession: "StateHasSession",
+ }
+)
+
+type State int32
+
+func (s State) String() string {
+ if name := stateNames[s]; name != "" {
+ return name
+ }
+ return "Unknown"
+}
+
+type ErrCode int32
+
+var (
+ ErrConnectionClosed = errors.New("zk: connection closed")
+ ErrUnknown = errors.New("zk: unknown error")
+ ErrAPIError = errors.New("zk: api error")
+ ErrNoNode = errors.New("zk: node does not exist")
+ ErrNoAuth = errors.New("zk: not authenticated")
+ ErrBadVersion = errors.New("zk: version conflict")
+ ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children")
+ ErrNodeExists = errors.New("zk: node already exists")
+ ErrNotEmpty = errors.New("zk: node has children")
+ ErrSessionExpired = errors.New("zk: session has been expired by the server")
+ ErrInvalidACL = errors.New("zk: invalid ACL specified")
+ ErrInvalidFlags = errors.New("zk: invalid flags specified")
+ ErrAuthFailed = errors.New("zk: client authentication failed")
+ ErrClosing = errors.New("zk: zookeeper is closing")
+ ErrNothing = errors.New("zk: no server responsees to process")
+ ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored")
+ ErrReconfigDisabled = errors.New("attempts to perform a reconfiguration operation when reconfiguration feature is disabled")
+ ErrBadArguments = errors.New("invalid arguments")
+ // ErrInvalidCallback = errors.New("zk: invalid callback specified")
+
+ errCodeToError = map[ErrCode]error{
+ 0: nil,
+ errAPIError: ErrAPIError,
+ errNoNode: ErrNoNode,
+ errNoAuth: ErrNoAuth,
+ errBadVersion: ErrBadVersion,
+ errNoChildrenForEphemerals: ErrNoChildrenForEphemerals,
+ errNodeExists: ErrNodeExists,
+ errNotEmpty: ErrNotEmpty,
+ errSessionExpired: ErrSessionExpired,
+ // errInvalidCallback: ErrInvalidCallback,
+ errInvalidAcl: ErrInvalidACL,
+ errAuthFailed: ErrAuthFailed,
+ errClosing: ErrClosing,
+ errNothing: ErrNothing,
+ errSessionMoved: ErrSessionMoved,
+ errZReconfigDisabled: ErrReconfigDisabled,
+ errBadArguments: ErrBadArguments,
+ }
+)
+
+func (e ErrCode) toError() error {
+ if err, ok := errCodeToError[e]; ok {
+ return err
+ }
+ return fmt.Errorf("unknown error: %v", e)
+}
+
+const (
+ errOk = 0
+ // System and server-side errors
+ errSystemError = -1
+ errRuntimeInconsistency = -2
+ errDataInconsistency = -3
+ errConnectionLoss = -4
+ errMarshallingError = -5
+ errUnimplemented = -6
+ errOperationTimeout = -7
+ errBadArguments = -8
+ errInvalidState = -9
+ // API errors
+ errAPIError ErrCode = -100
+ errNoNode ErrCode = -101 // *
+ errNoAuth ErrCode = -102
+ errBadVersion ErrCode = -103 // *
+ errNoChildrenForEphemerals ErrCode = -108
+ errNodeExists ErrCode = -110 // *
+ errNotEmpty ErrCode = -111
+ errSessionExpired ErrCode = -112
+ errInvalidCallback ErrCode = -113
+ errInvalidAcl ErrCode = -114
+ errAuthFailed ErrCode = -115
+ errClosing ErrCode = -116
+ errNothing ErrCode = -117
+ errSessionMoved ErrCode = -118
+ // Attempts to perform a reconfiguration operation when reconfiguration feature is disabled
+ errZReconfigDisabled ErrCode = -123
+)
+
+// Constants for ACL permissions
+const (
+ PermRead = 1 << iota
+ PermWrite
+ PermCreate
+ PermDelete
+ PermAdmin
+ PermAll = 0x1f
+)
+
+var (
+ emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ opNames = map[int32]string{
+ opNotify: "notify",
+ opCreate: "create",
+ opCreateContainer: "createContainer",
+ opCreateTTL: "createTTL",
+ opDelete: "delete",
+ opExists: "exists",
+ opGetData: "getData",
+ opSetData: "setData",
+ opGetAcl: "getACL",
+ opSetAcl: "setACL",
+ opGetChildren: "getChildren",
+ opSync: "sync",
+ opPing: "ping",
+ opGetChildren2: "getChildren2",
+ opCheck: "check",
+ opMulti: "multi",
+ opReconfig: "reconfig",
+ opClose: "close",
+ opSetAuth: "setAuth",
+ opSetWatches: "setWatches",
+
+ opWatcherEvent: "watcherEvent",
+ }
+)
+
+type EventType int32
+
+func (t EventType) String() string {
+ if name := eventNames[t]; name != "" {
+ return name
+ }
+ return "Unknown"
+}
+
+// Mode is used to build custom server modes (leader|follower|standalone).
+type Mode uint8
+
+func (m Mode) String() string {
+ if name := modeNames[m]; name != "" {
+ return name
+ }
+ return "unknown"
+}
+
+const (
+ ModeUnknown Mode = iota
+ ModeLeader Mode = iota
+ ModeFollower Mode = iota
+ ModeStandalone Mode = iota
+)
+
+var (
+ modeNames = map[Mode]string{
+ ModeLeader: "leader",
+ ModeFollower: "follower",
+ ModeStandalone: "standalone",
+ }
+)
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go b/vendor/github.com/go-zookeeper/zk/dnshostprovider.go
similarity index 100%
rename from vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go
rename to vendor/github.com/go-zookeeper/zk/dnshostprovider.go
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/flw.go b/vendor/github.com/go-zookeeper/zk/flw.go
similarity index 96%
rename from vendor/github.com/samuel/go-zookeeper/zk/flw.go
rename to vendor/github.com/go-zookeeper/zk/flw.go
index 1fb8b2aed..0ccc48661 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/flw.go
+++ b/vendor/github.com/go-zookeeper/zk/flw.go
@@ -24,7 +24,7 @@ func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
// different parts of the regular expression that are required to parse the srvr output
const (
zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)`
- zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)`
+ zrLat = `^Latency min/avg/max: (\d+)/([0-9.]+)/(\d+)`
zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)`
zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)`
)
@@ -97,7 +97,7 @@ func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
// within the regex above, these values must be numerical
// so we can avoid useless checking of the error return value
minLatency, _ := strconv.ParseInt(match[2], 0, 64)
- avgLatency, _ := strconv.ParseInt(match[3], 0, 64)
+ avgLatency, _ := strconv.ParseFloat(match[3], 64)
maxLatency, _ := strconv.ParseInt(match[4], 0, 64)
recv, _ := strconv.ParseInt(match[5], 0, 64)
sent, _ := strconv.ParseInt(match[6], 0, 64)
@@ -255,16 +255,12 @@ func fourLetterWord(server, command string, timeout time.Duration) ([]byte, erro
// once the command has been processed, but better safe than sorry
defer conn.Close()
- if err := conn.SetWriteDeadline(time.Now().Add(timeout)); err != nil {
- return nil, err
- }
+ conn.SetWriteDeadline(time.Now().Add(timeout))
_, err = conn.Write([]byte(command))
if err != nil {
return nil, err
}
- if err := conn.SetReadDeadline(time.Now().Add(timeout)); err != nil {
- return nil, err
- }
+ conn.SetReadDeadline(time.Now().Add(timeout))
return ioutil.ReadAll(conn)
}
diff --git a/vendor/github.com/go-zookeeper/zk/go.mod b/vendor/github.com/go-zookeeper/zk/go.mod
new file mode 100644
index 000000000..a2662730b
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-zookeeper/zk
+
+go 1.13
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/lock.go b/vendor/github.com/go-zookeeper/zk/lock.go
similarity index 82%
rename from vendor/github.com/samuel/go-zookeeper/zk/lock.go
rename to vendor/github.com/go-zookeeper/zk/lock.go
index 3c35a427c..33a6ecda3 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/lock.go
+++ b/vendor/github.com/go-zookeeper/zk/lock.go
@@ -36,13 +36,23 @@ func NewLock(c *Conn, path string, acl []ACL) *Lock {
func parseSeq(path string) (int, error) {
parts := strings.Split(path, "-")
+ // python client uses a __LOCK__ prefix
+ if len(parts) == 1 {
+ parts = strings.Split(path, "__")
+ }
return strconv.Atoi(parts[len(parts)-1])
}
-// Lock attempts to acquire the lock. It will wait to return until the lock
-// is acquired or an error occurs. If this instance already has the lock
-// then ErrDeadlock is returned.
+// Lock attempts to acquire the lock. It works like LockWithData, but it doesn't
+// write any data to the lock node.
func (l *Lock) Lock() error {
+ return l.LockWithData([]byte{})
+}
+
+// LockWithData attempts to acquire the lock, writing data into the lock node.
+// It will wait to return until the lock is acquired or an error occurs. If
+// this instance already has the lock then ErrDeadlock is returned.
+func (l *Lock) LockWithData(data []byte) error {
if l.lockPath != "" {
return ErrDeadlock
}
@@ -52,7 +62,7 @@ func (l *Lock) Lock() error {
path := ""
var err error
for i := 0; i < 3; i++ {
- path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl)
+ path, err = l.c.CreateProtectedEphemeralSequential(prefix, data, l.acl)
if err == ErrNoNode {
// Create parent node.
parts := strings.Split(l.path, "/")
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs.go b/vendor/github.com/go-zookeeper/zk/structs.go
similarity index 97%
rename from vendor/github.com/samuel/go-zookeeper/zk/structs.go
rename to vendor/github.com/go-zookeeper/zk/structs.go
index 9400c3c0b..e41d8c527 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/structs.go
+++ b/vendor/github.com/go-zookeeper/zk/structs.go
@@ -78,7 +78,7 @@ type ServerStats struct {
Received int64
NodeCount int64
MinLatency int64
- AvgLatency int64
+ AvgLatency float64
MaxLatency int64
Connections int64
Outstanding int64
@@ -165,6 +165,16 @@ type CreateRequest struct {
Flags int32
}
+type CreateContainerRequest CreateRequest
+
+type CreateTTLRequest struct {
+ Path string
+ Data []byte
+ Acl []ACL
+ Flags int32
+ Ttl int64 // ms
+}
+
type createResponse pathResponse
type DeleteRequest PathVersionRequest
type deleteResponse struct{}
@@ -589,6 +599,10 @@ func requestStructForOp(op int32) interface{} {
return &closeRequest{}
case opCreate:
return &CreateRequest{}
+ case opCreateContainer:
+ return &CreateContainerRequest{}
+ case opCreateTTL:
+ return &CreateTTLRequest{}
case opDelete:
return &DeleteRequest{}
case opExists:
diff --git a/vendor/github.com/go-zookeeper/zk/util.go b/vendor/github.com/go-zookeeper/zk/util.go
new file mode 100644
index 000000000..5a92b66ba
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/util.go
@@ -0,0 +1,119 @@
+package zk
+
+import (
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// AuthACL produces an ACL list containing a single ACL which uses the
+// provided permissions, with the scheme "auth", and ID "", which is used
+// by ZooKeeper to represent any authenticated user.
+func AuthACL(perms int32) []ACL {
+ return []ACL{{perms, "auth", ""}}
+}
+
+// WorldACL produces an ACL list containing a single ACL which uses the
+// provided permissions, with the scheme "world", and ID "anyone", which
+// is used by ZooKeeper to represent any user at all.
+func WorldACL(perms int32) []ACL {
+ return []ACL{{perms, "world", "anyone"}}
+}
+
+func DigestACL(perms int32, user, password string) []ACL {
+ userPass := []byte(fmt.Sprintf("%s:%s", user, password))
+ h := sha1.New()
+ if n, err := h.Write(userPass); err != nil || n != len(userPass) {
+ panic("SHA1 failed")
+ }
+ digest := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}}
+}
+
+// FormatServers takes a slice of addresses, and makes sure they are in a format
+// that resembles :. If the server has no port provided, the
+// DefaultPort constant is added to the end.
+func FormatServers(servers []string) []string {
+ srvs := make([]string, len(servers))
+ for i, addr := range servers {
+ if strings.Contains(addr, ":") {
+ srvs[i] = addr
+ } else {
+ srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
+ }
+ }
+ return srvs
+}
+
+// stringShuffle performs a Fisher-Yates shuffle on a slice of strings
+func stringShuffle(s []string) {
+ for i := len(s) - 1; i > 0; i-- {
+ j := rand.Intn(i + 1)
+ s[i], s[j] = s[j], s[i]
+ }
+}
+
+// validatePath will make sure a path is valid before sending the request
+func validatePath(path string, isSequential bool) error {
+ if path == "" {
+ return ErrInvalidPath
+ }
+
+ if path[0] != '/' {
+ return ErrInvalidPath
+ }
+
+ n := len(path)
+ if n == 1 {
+ // path is just the root
+ return nil
+ }
+
+ if !isSequential && path[n-1] == '/' {
+ return ErrInvalidPath
+ }
+
+ // Start at rune 1 since we already know that the first character is
+ // a '/'.
+ for i, w := 1, 0; i < n; i += w {
+ r, width := utf8.DecodeRuneInString(path[i:])
+ switch {
+ case r == '\u0000':
+ return ErrInvalidPath
+ case r == '/':
+ last, _ := utf8.DecodeLastRuneInString(path[:i])
+ if last == '/' {
+ return ErrInvalidPath
+ }
+ case r == '.':
+ last, lastWidth := utf8.DecodeLastRuneInString(path[:i])
+
+ // Check for double dot
+ if last == '.' {
+ last, _ = utf8.DecodeLastRuneInString(path[:i-lastWidth])
+ }
+
+ if last == '/' {
+ if i+1 == n {
+ return ErrInvalidPath
+ }
+
+ next, _ := utf8.DecodeRuneInString(path[i+w:])
+ if next == '/' {
+ return ErrInvalidPath
+ }
+ }
+ case r >= '\u0000' && r <= '\u001f',
+ r >= '\u007f' && r <= '\u009f',
+ r >= '\uf000' && r <= '\uf8ff',
+ r >= '\ufff0' && r < '\uffff':
+ return ErrInvalidPath
+ }
+ w = width
+ }
+ return nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
index 1ce0be2fa..f85c0cc81 100644
--- a/vendor/github.com/gogo/protobuf/proto/text_parser.go
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -318,7 +318,7 @@ func unescape(s string) (ch string, tail string, err error) {
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
- return string(i), s, nil
+ return string(rune(i)), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore
new file mode 100644
index 000000000..09573e016
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+
diff --git a/vendor/github.com/golang-jwt/jwt/v4/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE
new file mode 100644
index 000000000..35dbc2520
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md
new file mode 100644
index 000000000..0b6214930
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md
@@ -0,0 +1,22 @@
+## Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0]), the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as
+`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having
+troubles migrating, please open an issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+## Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
\ No newline at end of file
diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md
new file mode 100644
index 000000000..96fe3b978
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/README.md
@@ -0,0 +1,113 @@
+# jwt-go
+
+[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compataibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`.
+See the `MIGRATION_GUIDE.md` for more information.
+
+> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy).
+So we will support a major version of Go until there are two newer major releases.
+We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities
+which will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
+
+In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage:
+
+* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
+* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
+* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples)
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
+
+Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
+
+## Compliance
+
+This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
+
+* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:***
+A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
+
+## Usage Tips
+
+### Signing vs Encryption
+
+A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
+
+* The author of the token was in the possession of the signing secret
+* The data has not been modified since it was signed
+
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
+
+### Choosing a Signing Method
+
+There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
+
+Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
+
+Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+
+### Signing Methods and Key Types
+
+Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
+
+* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+
+### JWT and OAuth
+
+It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
+
+Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
+
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
+* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
+
+### Troubleshooting
+
+This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
+
+## More
+
+Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
+
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
similarity index 84%
rename from vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md
rename to vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
index 637029831..afbfc4e40 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md
+++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
@@ -1,5 +1,22 @@
## `jwt-go` Version History
+#### 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+#### 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+#### 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
#### 3.2.0
* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
@@ -115,4 +132,4 @@ It is likely the only integration change required here will be to change `func(t
* First versioned release
* API stabilized
* Supports creating, signing, parsing, and validating JWT tokens
-* Supports RS256 and HS256 signing methods
\ No newline at end of file
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/form3tech-oss/jwt-go/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go
similarity index 75%
rename from vendor/github.com/form3tech-oss/jwt-go/claims.go
rename to vendor/github.com/golang-jwt/jwt/v4/claims.go
index 624890666..7c2f33bcb 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/claims.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go
@@ -6,17 +6,17 @@ import (
"time"
)
-// For a type to be a Claims object, it must just have a Valid method that determines
+// Claims must just have a Valid method that determines
// if the token is invalid for any supported reason
type Claims interface {
Valid() error
}
-// Structured version of Claims Section, as referenced at
+// StandardClaims are a structured version of the Claims Section, as referenced at
// https://tools.ietf.org/html/rfc7519#section-4.1
// See examples for how to use this with your own claim types
type StandardClaims struct {
- Audience []string `json:"aud,omitempty"`
+ Audience string `json:"aud,omitempty"`
ExpiresAt int64 `json:"exp,omitempty"`
Id string `json:"jti,omitempty"`
IssuedAt int64 `json:"iat,omitempty"`
@@ -25,8 +25,7 @@ type StandardClaims struct {
Subject string `json:"sub,omitempty"`
}
-// Validates time based claims "exp, iat, nbf".
-// There is no accounting for clock skew.
+// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew.
// As well, if any of the above claims are not in the token, it will still
// be considered a valid claim.
func (c StandardClaims) Valid() error {
@@ -35,18 +34,18 @@ func (c StandardClaims) Valid() error {
// The claims below are optional, by default, so if they are set to the
// default value in Go, let's not fail the verification for them.
- if c.VerifyExpiresAt(now, false) == false {
+ if !c.VerifyExpiresAt(now, false) {
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
vErr.Errors |= ValidationErrorExpired
}
- if c.VerifyIssuedAt(now, false) == false {
+ if !c.VerifyIssuedAt(now, false) {
vErr.Inner = fmt.Errorf("Token used before issued")
vErr.Errors |= ValidationErrorIssuedAt
}
- if c.VerifyNotBefore(now, false) == false {
+ if !c.VerifyNotBefore(now, false) {
vErr.Inner = fmt.Errorf("token is not valid yet")
vErr.Errors |= ValidationErrorNotValidYet
}
@@ -58,31 +57,31 @@ func (c StandardClaims) Valid() error {
return vErr
}
-// Compares the aud claim against cmp.
+// VerifyAudience compares the aud claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
- return verifyAud(c.Audience, cmp, req)
+ return verifyAud([]string{c.Audience}, cmp, req)
}
-// Compares the exp claim against cmp.
+// VerifyExpiresAt compares the exp claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
return verifyExp(c.ExpiresAt, cmp, req)
}
-// Compares the iat claim against cmp.
+// VerifyIssuedAt compares the iat claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
return verifyIat(c.IssuedAt, cmp, req)
}
-// Compares the iss claim against cmp.
+// VerifyIssuer compares the iss claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
return verifyIss(c.Issuer, cmp, req)
}
-// Compares the nbf claim against cmp.
+// VerifyNotBefore compares the nbf claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
return verifyNbf(c.NotBefore, cmp, req)
@@ -94,13 +93,23 @@ func verifyAud(aud []string, cmp string, required bool) bool {
if len(aud) == 0 {
return !required
}
+ // use a var here to keep constant time compare when looping over a number of claims
+ result := false
+ var stringClaims string
for _, a := range aud {
if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
- return true
+ result = true
}
+ stringClaims = stringClaims + a
}
- return false
+
+ // case where "" is sent in one or many aud claims
+ if len(stringClaims) == 0 {
+ return !required
+ }
+
+ return result
}
func verifyExp(exp int64, now int64, required bool) bool {
diff --git a/vendor/github.com/form3tech-oss/jwt-go/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go
similarity index 100%
rename from vendor/github.com/form3tech-oss/jwt-go/doc.go
rename to vendor/github.com/golang-jwt/jwt/v4/doc.go
diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
similarity index 79%
rename from vendor/github.com/form3tech-oss/jwt-go/ecdsa.go
rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
index f97738124..eac023fc6 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
@@ -13,7 +13,7 @@ var (
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
)
-// Implements the ECDSA family of signing methods signing methods
+// SigningMethodECDSA implements the ECDSA family of signing methods.
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
type SigningMethodECDSA struct {
Name string
@@ -53,7 +53,7 @@ func (m *SigningMethodECDSA) Alg() string {
return m.Name
}
-// Implements the Verify method from SigningMethod
+// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an ecdsa.PublicKey struct
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -88,14 +88,14 @@ func (m *SigningMethodECDSA) Verify(signingString, signature string, key interfa
hasher.Write([]byte(signingString))
// Verify the signature
- if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
return nil
- } else {
- return ErrECDSAVerification
}
+
+ return ErrECDSAVerification
}
-// Implements the Sign method from SigningMethod
+// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an ecdsa.PrivateKey struct
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
// Get the key
@@ -128,18 +128,12 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string
keyBytes += 1
}
- // We serialize the outpus (r and s) into big-endian byte arrays and pad
- // them with zeros on the left to make sure the sizes work out. Both arrays
- // must be keyBytes long, and the output must be 2*keyBytes long.
- rBytes := r.Bytes()
- rBytesPadded := make([]byte, keyBytes)
- copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
-
- sBytes := s.Bytes()
- sBytesPadded := make([]byte, keyBytes)
- copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
-
- out := append(rBytesPadded, sBytesPadded...)
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
return EncodeSegment(out), nil
} else {
diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
similarity index 81%
rename from vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go
rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
index db9f4be7d..5700636d3 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
@@ -8,11 +8,11 @@ import (
)
var (
- ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
- ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
)
-// Parse PEM encoded Elliptic Curve Private Key Structure
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
var err error
@@ -39,7 +39,7 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
return pkey, nil
}
-// Parse PEM encoded PKCS1 or PKCS8 public key
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
var err error
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
new file mode 100644
index 000000000..9f40dc0c7
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
@@ -0,0 +1,81 @@
+package jwt
+
+import (
+ "errors"
+
+ "crypto/ed25519"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
+ var err error
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
+ var ed25519Key ed25519.PrivateKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PrivateKey); !ok {
+ return "", ErrInvalidKeyType
+ }
+
+ // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize
+ // this allows to avoid recover usage
+ if len(ed25519Key) != ed25519.PrivateKeySize {
+ return "", ErrInvalidKey
+ }
+
+ // Sign the string and return the encoded result
+ sig := ed25519.Sign(ed25519Key, []byte(signingString))
+ return EncodeSegment(sig), nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
new file mode 100644
index 000000000..cdb5e68e8
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go
new file mode 100644
index 000000000..f309878b3
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go
@@ -0,0 +1,59 @@
+package jwt
+
+import (
+ "errors"
+)
+
+// Error constants
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+ ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
+ ValidationErrorUnverifiable // Token could not be verified because of signing problems
+ ValidationErrorSignatureInvalid // Signature validation failed
+
+ // Standard Claim validation errors
+ ValidationErrorAudience // AUD validation failed
+ ValidationErrorExpired // EXP validation failed
+ ValidationErrorIssuedAt // IAT validation failed
+ ValidationErrorIssuer // ISS validation failed
+ ValidationErrorNotValidYet // NBF validation failed
+ ValidationErrorId // JTI validation failed
+ ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// NewValidationError is a helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+ return &ValidationError{
+ text: errorText,
+ Errors: errorFlags,
+ }
+}
+
+// ValidationError represents an error from Parse if token is not valid
+type ValidationError struct {
+ Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
+ Errors uint32 // bitfield. see ValidationError... constants
+ text string // errors that do not have a valid error just have text
+}
+
+// Error is the implementation of the err interface.
+func (e ValidationError) Error() string {
+ if e.Inner != nil {
+ return e.Inner.Error()
+ } else if e.text != "" {
+ return e.text
+ } else {
+ return "token is invalid"
+ }
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+ return e.Errors == 0
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/go.mod b/vendor/github.com/golang-jwt/jwt/v4/go.mod
new file mode 100644
index 000000000..6bc53fdcb
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/go.mod
@@ -0,0 +1,3 @@
+module github.com/golang-jwt/jwt/v4
+
+go 1.15
diff --git a/vendor/github.com/golang-jwt/jwt/v4/go.sum b/vendor/github.com/golang-jwt/jwt/v4/go.sum
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/github.com/form3tech-oss/jwt-go/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
similarity index 90%
rename from vendor/github.com/form3tech-oss/jwt-go/hmac.go
rename to vendor/github.com/golang-jwt/jwt/v4/hmac.go
index addbe5d40..011f68a27 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/hmac.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
@@ -6,7 +6,7 @@ import (
"errors"
)
-// Implements the HMAC-SHA family of signing methods signing methods
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
// Expects key type of []byte for both signing and validation
type SigningMethodHMAC struct {
Name string
@@ -45,7 +45,7 @@ func (m *SigningMethodHMAC) Alg() string {
return m.Name
}
-// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
+// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid.
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
// Verify the key is the right type
keyBytes, ok := key.([]byte)
@@ -77,7 +77,7 @@ func (m *SigningMethodHMAC) Verify(signingString, signature string, key interfac
return nil
}
-// Implements the Sign method from SigningMethod for this signing method.
+// Sign implements token signing for the SigningMethod.
// Key must be []byte
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
if keyBytes, ok := key.([]byte); ok {
diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
new file mode 100644
index 000000000..7e00e753d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
@@ -0,0 +1,120 @@
+package jwt
+
+import (
+ "encoding/json"
+ "errors"
+ // "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding.
+// This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// VerifyAudience Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+ var aud []string
+ switch v := m["aud"].(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = v
+ case []interface{}:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return false
+ }
+ aud = append(aud, vs)
+ }
+ }
+ return verifyAud(aud, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ exp, ok := m["exp"]
+ if !ok {
+ return !req
+ }
+ switch expType := exp.(type) {
+ case float64:
+ return verifyExp(int64(expType), cmp, req)
+ case json.Number:
+ v, _ := expType.Int64()
+ return verifyExp(v, cmp, req)
+ }
+ return false
+}
+
+// VerifyIssuedAt compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ iat, ok := m["iat"]
+ if !ok {
+ return !req
+ }
+ switch iatType := iat.(type) {
+ case float64:
+ return verifyIat(int64(iatType), cmp, req)
+ case json.Number:
+ v, _ := iatType.Int64()
+ return verifyIat(v, cmp, req)
+ }
+ return false
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+ iss, _ := m["iss"].(string)
+ return verifyIss(iss, cmp, req)
+}
+
+// VerifyNotBefore compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ nbf, ok := m["nbf"]
+ if !ok {
+ return !req
+ }
+ switch nbfType := nbf.(type) {
+ case float64:
+ return verifyNbf(int64(nbfType), cmp, req)
+ case json.Number:
+ v, _ := nbfType.Int64()
+ return verifyNbf(v, cmp, req)
+ }
+ return false
+}
+
+// Valid calidates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (m MapClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ if !m.VerifyExpiresAt(now, false) {
+ vErr.Inner = errors.New("Token is expired")
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !m.VerifyIssuedAt(now, false) {
+ vErr.Inner = errors.New("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !m.VerifyNotBefore(now, false) {
+ vErr.Inner = errors.New("Token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go
similarity index 94%
rename from vendor/github.com/form3tech-oss/jwt-go/none.go
rename to vendor/github.com/golang-jwt/jwt/v4/none.go
index f04d189d0..f19835d20 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/none.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/none.go
@@ -1,6 +1,6 @@
package jwt
-// Implements the none signing method. This is required by the spec
+// SigningMethodNone implements the none signing method. This is required by the spec
// but you probably should never use it.
var SigningMethodNone *signingMethodNone
diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go
new file mode 100644
index 000000000..0c811f311
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go
@@ -0,0 +1,148 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type Parser struct {
+ ValidMethods []string // If populated, only these methods will be considered valid
+ UseJSONNumber bool // Use JSON Number format in JSON decoder
+ SkipClaimsValidation bool // Skip claims validation during token parsing
+}
+
+// Parse parses, validates, and returns a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.ValidMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.ValidMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+ }
+ }
+
+ // Lookup key
+ var key interface{}
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+ }
+ if key, err = keyFunc(token); err != nil {
+ // keyFunc returned an error
+ if ve, ok := err.(*ValidationError); ok {
+ return token, ve
+ }
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+ }
+
+ vErr := &ValidationError{}
+
+ // Validate Claims
+ if !p.SkipClaimsValidation {
+ if err := token.Claims.Valid(); err != nil {
+
+ // If the Claims Valid returned an error, check if it is a validation error,
+ // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+ if e, ok := err.(*ValidationError); !ok {
+ vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+ } else {
+ vErr = e
+ }
+ }
+ }
+
+ // Perform validation
+ token.Signature = parts[2]
+ if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ vErr.Inner = err
+ vErr.Errors |= ValidationErrorSignatureInvalid
+ }
+
+ if vErr.valid() {
+ token.Valid = true
+ return token, nil
+ }
+
+ return token, vErr
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ parts = strings.Split(tokenString, ".")
+ if len(parts) != 3 {
+ return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+ if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+ return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+ }
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // parse Claims
+ var claimBytes []byte
+ token.Claims = claims
+
+ if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ if p.UseJSONNumber {
+ dec.UseNumber()
+ }
+ // JSON Decode. Special case for map type to avoid weird pointer behavior
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ // Handle decode error
+ if err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+ }
+ } else {
+ return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+ }
+
+ return token, parts, nil
+}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
similarity index 92%
rename from vendor/github.com/form3tech-oss/jwt-go/rsa.go
rename to vendor/github.com/golang-jwt/jwt/v4/rsa.go
index e4caf1ca4..b910b19c0 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/rsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
@@ -6,7 +6,7 @@ import (
"crypto/rsa"
)
-// Implements the RSA family of signing methods signing methods
+// SigningMethodRSA implements the RSA family of signing methods.
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
type SigningMethodRSA struct {
Name string
@@ -44,7 +44,7 @@ func (m *SigningMethodRSA) Alg() string {
return m.Name
}
-// Implements the Verify method from SigningMethod
+// Verify implements token verification for the SigningMethod
// For this signing method, must be an *rsa.PublicKey structure.
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -73,7 +73,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
}
-// Implements the Sign method from SigningMethod
+// Sign implements token signing for the SigningMethod
// For this signing method, must be an *rsa.PrivateKey structure.
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
similarity index 94%
rename from vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go
rename to vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
index c01470864..5a8502feb 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
@@ -8,7 +8,7 @@ import (
"crypto/rsa"
)
-// Implements the RSAPSS family of signing methods signing methods
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
type SigningMethodRSAPSS struct {
*SigningMethodRSA
Options *rsa.PSSOptions
@@ -79,7 +79,7 @@ func init() {
})
}
-// Implements the Verify method from SigningMethod
+// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an rsa.PublicKey struct
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -113,7 +113,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interf
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
}
-// Implements the Sign method from SigningMethod
+// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an rsa.PrivateKey struct
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
new file mode 100644
index 000000000..1966c450b
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
@@ -0,0 +1,105 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/form3tech-oss/jwt-go/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go
similarity index 79%
rename from vendor/github.com/form3tech-oss/jwt-go/signing_method.go
rename to vendor/github.com/golang-jwt/jwt/v4/signing_method.go
index ed1f212b2..3269170f3 100644
--- a/vendor/github.com/form3tech-oss/jwt-go/signing_method.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go
@@ -7,14 +7,14 @@ import (
var signingMethods = map[string]func() SigningMethod{}
var signingMethodLock = new(sync.RWMutex)
-// Implement SigningMethod to add new methods for signing or verifying tokens.
+// SigningMethod can be used add new methods for signing or verifying tokens.
type SigningMethod interface {
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
Alg() string // returns the alg identifier for this method (example: 'HS256')
}
-// Register the "alg" name and a factory function for signing method.
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
// This is typically done during init() in the method's implementation
func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethodLock.Lock()
@@ -23,7 +23,7 @@ func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethods[alg] = f
}
-// Get a signing method from an "alg" string
+// GetSigningMethod retrieves a signing method from an "alg" string
func GetSigningMethod(alg string) (method SigningMethod) {
signingMethodLock.RLock()
defer signingMethodLock.RUnlock()
diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf
new file mode 100644
index 000000000..53745d51d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go
new file mode 100644
index 000000000..b896acb0b
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/token.go
@@ -0,0 +1,110 @@
+package jwt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "strings"
+ "time"
+)
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value. This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed,
+// but unverified Token. This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// Token represents a JWT Token. Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+ Raw string // The raw token. Populated when you Parse a token
+ Method SigningMethod // The signing method used or to be used
+ Header map[string]interface{} // The first segment of the token
+ Claims Claims // The second segment of the token
+ Signature string // The third segment of the token. Populated when you Parse a token
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+}
+
+// New creates a new Token. Takes a signing method
+func New(method SigningMethod) *Token {
+ return NewWithClaims(method, MapClaims{})
+}
+
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString retrieves the complete, signed token
+func (t *Token) SignedString(key interface{}) (string, error) {
+ var sig, sstr string
+ var err error
+ if sstr, err = t.SigningString(); err != nil {
+ return "", err
+ }
+ if sig, err = t.Method.Sign(sstr, key); err != nil {
+ return "", err
+ }
+ return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// SigningString generates the signing string. This is the
+// most expensive part of the whole deal. Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+ var err error
+ parts := make([]string, 2)
+ for i := range parts {
+ var jsonValue []byte
+ if i == 0 {
+ if jsonValue, err = json.Marshal(t.Header); err != nil {
+ return "", err
+ }
+ } else {
+ if jsonValue, err = json.Marshal(t.Claims); err != nil {
+ return "", err
+ }
+ }
+
+ parts[i] = EncodeSegment(jsonValue)
+ }
+ return strings.Join(parts, "."), nil
+}
+
+// Parse parses, validates, and returns a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).Parse(tokenString, keyFunc)
+}
+
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func DecodeSegment(seg string) ([]byte, error) {
+ return base64.RawURLEncoding.DecodeString(seg)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
index 1e7ff6420..066b4323b 100644
--- a/vendor/github.com/golang/protobuf/proto/registry.go
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -13,6 +13,7 @@ import (
"strings"
"sync"
+ "google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
@@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP {
// Find the descriptor in the v2 registry.
var b []byte
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
- if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
- b = fd.ProtoLegacyRawDesc()
- } else {
- // TODO: Use protodesc.ToFileDescriptorProto to construct
- // a descriptorpb.FileDescriptorProto and marshal it.
- // However, doing so causes the proto package to have a dependency
- // on descriptorpb, leading to cyclic dependency issues.
- }
+ b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
}
// Locally cache the raw descriptor form for the file.
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 000000000..63dc05785
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
+
+package descriptor
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/descriptor.proto.
+
+type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
+
+const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
+const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
+const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
+const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
+const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
+const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
+const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
+const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
+const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
+const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
+const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
+const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
+const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
+const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
+const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
+const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
+const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
+const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
+
+var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
+var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
+
+type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
+
+const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
+const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
+const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
+
+var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
+var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
+
+type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
+
+const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
+const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
+const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
+
+var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
+var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
+
+type FieldOptions_CType = descriptorpb.FieldOptions_CType
+
+const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
+const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
+const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
+
+var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
+var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
+
+type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
+
+const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
+const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
+const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
+
+var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
+var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
+
+type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
+
+const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
+const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
+const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
+
+var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
+var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
+
+type FileDescriptorSet = descriptorpb.FileDescriptorSet
+type FileDescriptorProto = descriptorpb.FileDescriptorProto
+type DescriptorProto = descriptorpb.DescriptorProto
+type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
+type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
+type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
+type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
+type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
+type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
+type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
+
+const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
+const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
+
+type FileOptions = descriptorpb.FileOptions
+
+const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
+const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
+const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
+const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
+const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
+const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
+const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices
+const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
+const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
+
+type MessageOptions = descriptorpb.MessageOptions
+
+const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
+const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
+const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
+
+type FieldOptions = descriptorpb.FieldOptions
+
+const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
+const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
+const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
+const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
+const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
+
+type OneofOptions = descriptorpb.OneofOptions
+type EnumOptions = descriptorpb.EnumOptions
+
+const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
+
+type EnumValueOptions = descriptorpb.EnumValueOptions
+
+const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
+
+type ServiceOptions = descriptorpb.ServiceOptions
+
+const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
+
+type MethodOptions = descriptorpb.MethodOptions
+
+const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
+const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
+
+type UninterpretedOption = descriptorpb.UninterpretedOption
+type SourceCodeInfo = descriptorpb.SourceCodeInfo
+type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
+type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
+type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
+type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
+type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
+type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
+type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
+
+var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
+ 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
+ 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x32,
+}
+
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
+func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
+ if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index e729dcff1..85f9f5736 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/"
// AnyMessageName returns the message name contained in an anypb.Any message.
// Most type assertions should use the Is function instead.
+//
+// Deprecated: Call the any.MessageName method instead.
func AnyMessageName(any *anypb.Any) (string, error) {
name, err := anyMessageName(any)
return string(name), err
@@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
}
// MarshalAny marshals the given message m into an anypb.Any message.
+//
+// Deprecated: Call the anypb.New function instead.
func MarshalAny(m proto.Message) (*anypb.Any, error) {
switch dm := m.(type) {
case DynamicAny:
@@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) {
// Empty returns a new message of the type specified in an anypb.Any message.
// It returns protoregistry.NotFound if the corresponding message type could not
// be resolved in the global registry.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
+// to resolve the message name and create a new instance of it.
func Empty(any *anypb.Any) (proto.Message, error) {
name, err := anyMessageName(any)
if err != nil {
@@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) {
//
// The target message m may be a *DynamicAny message. If the underlying message
// type could not be resolved, then this returns protoregistry.NotFound.
+//
+// Deprecated: Call the any.UnmarshalTo method instead.
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
if dm, ok := m.(*DynamicAny); ok {
if dm.Message == nil {
@@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error {
}
// Is reports whether the Any message contains a message of the specified type.
+//
+// Deprecated: Call the any.MessageIs method instead.
func Is(any *anypb.Any, m proto.Message) bool {
if any == nil || m == nil {
return false
@@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool {
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
+//
+// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
+// the any message contents into a new instance of the underlying message.
type DynamicAny struct{ proto.Message }
func (m DynamicAny) String() string {
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
index fb9edd5c6..d3c33259d 100644
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -3,4 +3,8 @@
// license that can be found in the LICENSE file.
// Package ptypes provides functionality for interacting with well-known types.
+//
+// Deprecated: Well-known types have specialized functionality directly
+// injected into the generated packages for each message type.
+// See the deprecation notice for each function for the suggested alternative.
package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 6110ae8a4..b2b55dd85 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -21,6 +21,8 @@ const (
// Duration converts a durationpb.Duration to a time.Duration.
// Duration returns an error if dur is invalid or overflows a time.Duration.
+//
+// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
func Duration(dur *durationpb.Duration) (time.Duration, error) {
if err := validateDuration(dur); err != nil {
return 0, err
@@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) {
}
// DurationProto converts a time.Duration to a durationpb.Duration.
+//
+// Deprecated: Call the durationpb.New function instead.
func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 000000000..8d82abe21
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,78 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/struct/struct.proto
+
+package structpb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/struct.proto.
+
+type NullValue = structpb.NullValue
+
+const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE
+
+var NullValue_name = structpb.NullValue_name
+var NullValue_value = structpb.NullValue_value
+
+type Struct = structpb.Struct
+type Value = structpb.Value
+type Value_NullValue = structpb.Value_NullValue
+type Value_NumberValue = structpb.Value_NumberValue
+type Value_StringValue = structpb.Value_StringValue
+type Value_BoolValue = structpb.Value_BoolValue
+type Value_StructValue = structpb.Value_StructValue
+type Value_ListValue = structpb.Value_ListValue
+type ListValue = structpb.ListValue
+
+var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{
+ 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() }
+func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File
+ file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 026d0d491..8368a3f70 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -33,6 +33,8 @@ const (
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
+//
+// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
@@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
+//
+// Deprecated: Call the timestamppb.Now function instead.
func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
@@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
+//
+// Deprecated: Call the timestamppb.New function instead.
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
ts := ×tamppb.Timestamp{
Seconds: t.Unix(),
@@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
// TimestampString returns the RFC 3339 string for valid Timestamps.
// For invalid Timestamps, it returns an error message in parentheses.
+//
+// Deprecated: Call the ts.AsTime method instead,
+// followed by a call to the Format method on the time.Time value.
func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 000000000..cc40f27ad
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,71 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
+
+package wrappers
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/wrappers.proto.
+
+type DoubleValue = wrapperspb.DoubleValue
+type FloatValue = wrapperspb.FloatValue
+type Int64Value = wrapperspb.Int64Value
+type UInt64Value = wrapperspb.UInt64Value
+type Int32Value = wrapperspb.Int32Value
+type UInt32Value = wrapperspb.UInt32Value
+type BoolValue = wrapperspb.BoolValue
+type StringValue = wrapperspb.StringValue
+type BytesValue = wrapperspb.BytesValue
+
+var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
+ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
+ 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
+ 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
+func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
+ file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
index 203e84eba..52ccb5a93 100644
--- a/vendor/github.com/golang/snappy/AUTHORS
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -10,6 +10,7 @@
Amazon.com, Inc
Damian Gryski
+Eric Buth
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Klaus Post
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
index d9914732b..ea6524ddd 100644
--- a/vendor/github.com/golang/snappy/CONTRIBUTORS
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -26,7 +26,9 @@
# Please keep the list sorted.
+Alex Legg
Damian Gryski
+Eric Buth
Jan Mercl <0xjnml@gmail.com>
Jonathan Swinney
Kai Backman
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
index f1e04b172..23c6e26c6 100644
--- a/vendor/github.com/golang/snappy/decode.go
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -118,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
return true
}
-// Read satisfies the io.Reader interface.
-func (r *Reader) Read(p []byte) (int, error) {
- if r.err != nil {
- return 0, r.err
- }
- for {
- if r.i < r.j {
- n := copy(p, r.decoded[r.i:r.j])
- r.i += n
- return n, nil
- }
+func (r *Reader) fill() error {
+ for r.i >= r.j {
if !r.readFull(r.buf[:4], true) {
- return 0, r.err
+ return r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
- return 0, r.err
+ return r.err
}
// The chunk types are specified at
@@ -153,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) {
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
- return 0, r.err
+ return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
@@ -165,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) {
n, err := DecodedLen(buf)
if err != nil {
r.err = err
- return 0, r.err
+ return r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
- return 0, r.err
+ return r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
r.i, r.j = 0, n
continue
@@ -186,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) {
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
- return 0, r.err
+ return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if n > len(r.decoded) {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
if !r.readFull(r.decoded[:n], false) {
- return 0, r.err
+ return r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
r.i, r.j = 0, n
continue
@@ -213,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) {
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
- return 0, r.err
+ return r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
- return 0, r.err
+ return r.err
}
}
continue
@@ -230,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) {
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
- return 0, r.err
+ return r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
- return 0, r.err
+ return r.err
}
}
+
+ return nil
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+
+ if err := r.fill(); err != nil {
+ return 0, err
+ }
+
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+}
+
+// ReadByte satisfies the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+
+ if err := r.fill(); err != nil {
+ return 0, err
+ }
+
+ c := r.decoded[r.i]
+ r.i++
+ return c, nil
}
diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s
index bfafa0ccf..7a3ead17e 100644
--- a/vendor/github.com/golang/snappy/decode_arm64.s
+++ b/vendor/github.com/golang/snappy/decode_arm64.s
@@ -70,7 +70,7 @@ loop:
// x := uint32(src[s] >> 2)
// switch
MOVW $60, R1
- ADD R4>>2, ZR, R4
+ LSRW $2, R4, R4
CMPW R4, R1
BLS tagLit60Plus
@@ -111,13 +111,12 @@ doLit:
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
- MOVD $16, R1
- CMP R1, R4
- BGT callMemmove
- CMP R1, R2
- BLT callMemmove
- CMP R1, R3
- BLT callMemmove
+ CMP $16, R4
+ BGT callMemmove
+ CMP $16, R2
+ BLT callMemmove
+ CMP $16, R3
+ BLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
@@ -130,9 +129,8 @@ doLit:
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
-
- VLD1 0(R6), [V0.B16]
- VST1 [V0.B16], 0(R7)
+ LDP 0(R6), (R14, R15)
+ STP (R14, R15), 0(R7)
// d += length
// s += length
@@ -210,8 +208,7 @@ tagLit61:
B doLit
tagLit62Plus:
- MOVW $62, R1
- CMPW R1, R4
+ CMPW $62, R4
BHI tagLit63
// case x == 62:
@@ -273,10 +270,9 @@ tagCopy:
// We have a copy tag. We assume that:
// - R3 == src[s] & 0x03
// - R4 == src[s]
- MOVD $2, R1
- CMP R1, R3
- BEQ tagCopy2
- BGT tagCopy4
+ CMP $2, R3
+ BEQ tagCopy2
+ BGT tagCopy4
// case tagCopy1:
// s += 2
@@ -346,13 +342,11 @@ doCopy:
// }
// copy 16 bytes
// d += length
- MOVD $16, R1
- MOVD $8, R0
- CMP R1, R4
+ CMP $16, R4
BGT slowForwardCopy
- CMP R0, R5
+ CMP $8, R5
BLT slowForwardCopy
- CMP R1, R14
+ CMP $16, R14
BLT slowForwardCopy
MOVD 0(R15), R2
MOVD R2, 0(R7)
@@ -426,8 +420,7 @@ makeOffsetAtLeast8:
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
- MOVD $8, R1
- CMP R1, R5
+ CMP $8, R5
BGE fixUpSlowForwardCopy
MOVD (R15), R3
MOVD R3, (R7)
@@ -477,9 +470,7 @@ verySlowForwardCopy:
ADD $1, R15, R15
ADD $1, R7, R7
SUB $1, R4, R4
- MOVD $0, R1
- CMP R1, R4
- BNE verySlowForwardCopy
+ CBNZ R4, verySlowForwardCopy
B loop
// The code above handles copy tags.
diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s
index 1f565ee75..f8d54adfc 100644
--- a/vendor/github.com/golang/snappy/encode_arm64.s
+++ b/vendor/github.com/golang/snappy/encode_arm64.s
@@ -35,11 +35,9 @@ TEXT ·emitLiteral(SB), NOSPLIT, $32-56
MOVW R3, R4
SUBW $1, R4, R4
- MOVW $60, R2
- CMPW R2, R4
+ CMPW $60, R4
BLT oneByte
- MOVW $256, R2
- CMPW R2, R4
+ CMPW $256, R4
BLT twoBytes
threeBytes:
@@ -98,8 +96,7 @@ TEXT ·emitCopy(SB), NOSPLIT, $0-48
loop0:
// for length >= 68 { etc }
- MOVW $68, R2
- CMPW R2, R3
+ CMPW $68, R3
BLT step1
// Emit a length 64 copy, encoded as 3 bytes.
@@ -112,9 +109,8 @@ loop0:
step1:
// if length > 64 { etc }
- MOVD $64, R2
- CMP R2, R3
- BLE step2
+ CMP $64, R3
+ BLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVD $0xee, R2
@@ -125,11 +121,9 @@ step1:
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
- MOVD $12, R2
- CMP R2, R3
+ CMP $12, R3
BGE step3
- MOVW $2048, R2
- CMPW R2, R11
+ CMPW $2048, R11
BGE step3
// Emit the remaining copy, encoded as 2 bytes.
@@ -295,27 +289,24 @@ varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
- // first tableSize elements. Each uint16 element is 2 bytes and each VST1
- // writes 64 bytes, so we can do only tableSize/32 writes instead of the
- // 2048 writes that would zero-initialize all of table's 32768 bytes.
- // This clear could overrun the first tableSize elements, but it won't
- // overrun the allocated stack size.
+ // first tableSize elements. Each uint16 element is 2 bytes and each
+ // iterations writes 64 bytes, so we can do only tableSize/32 writes
+ // instead of the 2048 writes that would zero-initialize all of table's
+ // 32768 bytes. This clear could overrun the first tableSize elements, but
+ // it won't overrun the allocated stack size.
ADD $128, RSP, R17
MOVD R17, R4
// !!! R6 = &src[tableSize]
ADD R6<<1, R17, R6
- // zero the SIMD registers
- VEOR V0.B16, V0.B16, V0.B16
- VEOR V1.B16, V1.B16, V1.B16
- VEOR V2.B16, V2.B16, V2.B16
- VEOR V3.B16, V3.B16, V3.B16
-
memclr:
- VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R4)
- CMP R4, R6
- BHI memclr
+ STP.P (ZR, ZR), 64(R4)
+ STP (ZR, ZR), -48(R4)
+ STP (ZR, ZR), -32(R4)
+ STP (ZR, ZR), -16(R4)
+ CMP R4, R6
+ BHI memclr
// !!! R6 = &src[0]
MOVD R7, R6
@@ -391,7 +382,7 @@ inner0:
// if load32(src, s) != load32(src, candidate) { continue } break
MOVW 0(R7), R3
- MOVW (R6)(R15*1), R4
+ MOVW (R6)(R15), R4
CMPW R4, R3
BNE inner0
@@ -404,8 +395,7 @@ fourByteMatch:
// on inputMargin in encode.go.
MOVD R7, R3
SUB R10, R3, R3
- MOVD $16, R2
- CMP R2, R3
+ CMP $16, R3
BLE emitLiteralFastPath
// ----------------------------------------
@@ -454,18 +444,21 @@ inlineEmitLiteralMemmove:
MOVD R3, 24(RSP)
// Finish the "d +=" part of "d += emitLiteral(etc)".
- ADD R3, R8, R8
- MOVD R7, 80(RSP)
- MOVD R8, 88(RSP)
- MOVD R15, 120(RSP)
- CALL runtime·memmove(SB)
- MOVD 64(RSP), R5
- MOVD 72(RSP), R6
- MOVD 80(RSP), R7
- MOVD 88(RSP), R8
- MOVD 96(RSP), R9
- MOVD 120(RSP), R15
- B inner1
+ ADD R3, R8, R8
+ MOVD R7, 80(RSP)
+ MOVD R8, 88(RSP)
+ MOVD R15, 120(RSP)
+ CALL runtime·memmove(SB)
+ MOVD 64(RSP), R5
+ MOVD 72(RSP), R6
+ MOVD 80(RSP), R7
+ MOVD 88(RSP), R8
+ MOVD 96(RSP), R9
+ MOVD 120(RSP), R15
+ ADD $128, RSP, R17
+ MOVW $0xa7bd, R16
+ MOVKW $(0x1e35<<16), R16
+ B inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
@@ -489,9 +482,9 @@ emitLiteralFastPath:
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
- VLD1 0(R10), [V0.B16]
- VST1 [V0.B16], 0(R8)
- ADD R3, R8, R8
+ LDP 0(R10), (R0, R1)
+ STP (R0, R1), 0(R8)
+ ADD R3, R8, R8
inner1:
// for { etc }
@@ -679,7 +672,7 @@ inlineEmitCopyEnd:
MOVHU R3, 0(R17)(R11<<1)
// if uint32(x>>8) == load32(src, candidate) { continue }
- MOVW (R6)(R15*1), R4
+ MOVW (R6)(R15), R4
CMPW R4, R14
BEQ inner1
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
index 3d45c1a47..f01eff318 100644
--- a/vendor/github.com/google/go-cmp/cmp/path.go
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -315,7 +315,7 @@ func (tf Transform) Option() Option { return tf.trans }
// pops the address from the stack. Thus, when traversing into a pointer from
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
// by checking whether the pointer has already been visited. The cycle detection
-// uses a seperate stack for the x and y values.
+// uses a separate stack for the x and y values.
//
// If a cycle is detected we need to determine whether the two pointers
// should be considered equal. The definition of equality chosen by Equal
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
index a6c070cfc..104bb3053 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -79,7 +79,7 @@ func (opts formatOptions) verbosity() uint {
}
}
-const maxVerbosityPreset = 3
+const maxVerbosityPreset = 6
// verbosityPreset modifies the verbosity settings given an index
// between 0 and maxVerbosityPreset, inclusive.
@@ -100,7 +100,7 @@ func verbosityPreset(opts formatOptions, i int) formatOptions {
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
if opts.DiffMode == diffIdentical {
opts = opts.WithVerbosity(1)
- } else {
+ } else if opts.verbosity() < 3 {
opts = opts.WithVerbosity(3)
}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
index da04caf16..2ad3bc85b 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_slices.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -7,6 +7,7 @@ package cmp
import (
"bytes"
"fmt"
+ "math"
"reflect"
"strconv"
"strings"
@@ -26,8 +27,6 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
return false // No differences detected
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
return false // Both values must be valid
- case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0):
- return false // Both slice values have to be non-empty
case v.NumIgnored > 0:
return false // Some ignore option was used
case v.NumTransformed > 0:
@@ -45,7 +44,16 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
return false
}
- switch t := v.Type; t.Kind() {
+ // Check whether this is an interface with the same concrete types.
+ t := v.Type
+ vx, vy := v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ }
+
+ // Check whether we provide specialized diffing for this type.
+ switch t.Kind() {
case reflect.String:
case reflect.Array, reflect.Slice:
// Only slices of primitive types have specialized handling.
@@ -57,6 +65,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
return false
}
+ // Both slice values have to be non-empty.
+ if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
+ return false
+ }
+
// If a sufficient number of elements already differ,
// use specialized formatting even if length requirement is not met.
if v.NumDiff > v.NumSame {
@@ -68,7 +81,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
// Use specialized string diffing for longer slices or strings.
const minLength = 64
- return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength
+ return vx.Len() >= minLength && vy.Len() >= minLength
}
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
@@ -77,17 +90,23 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
assert(opts.DiffMode == diffUnknown)
t, vx, vy := v.Type, v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ opts = opts.WithTypeMode(emitType)
+ }
// Auto-detect the type of the data.
- var isLinedText, isText, isBinary bool
var sx, sy string
+ var ssx, ssy []string
+ var isString, isMostlyText, isPureLinedText, isBinary bool
switch {
case t.Kind() == reflect.String:
sx, sy = vx.String(), vy.String()
- isText = true // Initial estimate, verify later
+ isString = true
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
sx, sy = string(vx.Bytes()), string(vy.Bytes())
- isBinary = true // Initial estimate, verify later
+ isString = true
case t.Kind() == reflect.Array:
// Arrays need to be addressable for slice operations to work.
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
@@ -95,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
vy2.Set(vy)
vx, vy = vx2, vy2
}
- if isText || isBinary {
- var numLines, lastLineIdx, maxLineLen int
- isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy)
+ if isString {
+ var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
for i, r := range sx + sy {
- if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
- isBinary = true
- break
+ numTotalRunes++
+ if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
+ numValidRunes++
}
if r == '\n' {
if maxLineLen < i-lastLineIdx {
@@ -111,8 +129,26 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
numLines++
}
}
- isText = !isBinary
- isLinedText = isText && numLines >= 4 && maxLineLen <= 1024
+ isPureText := numValidRunes == numTotalRunes
+ isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
+ isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
+ isBinary = !isMostlyText
+
+ // Avoid diffing by lines if it produces a significantly more complex
+ // edit script than diffing by bytes.
+ if isPureLinedText {
+ ssx = strings.Split(sx, "\n")
+ ssy = strings.Split(sy, "\n")
+ esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(ssx[ix] == ssy[iy])
+ })
+ esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(sx[ix] == sy[iy])
+ })
+ efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
+ efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes
+ }
}
// Format the string into printable records.
@@ -121,9 +157,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
switch {
// If the text appears to be multi-lined text,
// then perform differencing across individual lines.
- case isLinedText:
- ssx := strings.Split(sx, "\n")
- ssy := strings.Split(sy, "\n")
+ case isPureLinedText:
list = opts.formatDiffSlice(
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
func(v reflect.Value, d diffMode) textRecord {
@@ -212,7 +246,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
// If the text appears to be single-lined text,
// then perform differencing in approximately fixed-sized chunks.
// The output is printed as quoted strings.
- case isText:
+ case isMostlyText:
list = opts.formatDiffSlice(
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
func(v reflect.Value, d diffMode) textRecord {
@@ -220,7 +254,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
return textRecord{Diff: d, Value: textLine(s)}
},
)
- delim = ""
// If the text appears to be binary data,
// then perform differencing in approximately fixed-sized chunks.
@@ -282,7 +315,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
// Wrap the output with appropriate type information.
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
- if !isText {
+ if !isMostlyText {
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
// Emit the type for extra clarity (e.g. "string{...}").
if t.Kind() == reflect.String {
@@ -321,8 +354,11 @@ func (opts formatOptions) formatDiffSlice(
vx, vy reflect.Value, chunkSize int, name string,
makeRec func(reflect.Value, diffMode) textRecord,
) (list textList) {
- es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
- return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
+ eq := func(ix, iy int) bool {
+ return vx.Index(ix).Interface() == vy.Index(iy).Interface()
+ }
+ es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+ return diff.BoolResult(eq(ix, iy))
})
appendChunks := func(v reflect.Value, d diffMode) int {
@@ -347,6 +383,7 @@ func (opts formatOptions) formatDiffSlice(
groups := coalesceAdjacentEdits(name, es)
groups = coalesceInterveningIdentical(groups, chunkSize/4)
+ groups = cleanupSurroundingIdentical(groups, eq)
maxGroup := diffStats{Name: name}
for i, ds := range groups {
if maxLen >= 0 && numDiffs >= maxLen {
@@ -399,25 +436,36 @@ func (opts formatOptions) formatDiffSlice(
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
// equal or unequal counts.
+//
+// Example:
+//
+// Input: "..XXY...Y"
+// Output: [
+// {NumIdentical: 2},
+// {NumRemoved: 2, NumInserted 1},
+// {NumIdentical: 3},
+// {NumInserted: 1},
+// ]
+//
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
- var prevCase int // Arbitrary index into which case last occurred
- lastStats := func(i int) *diffStats {
- if prevCase != i {
+ var prevMode byte
+ lastStats := func(mode byte) *diffStats {
+ if prevMode != mode {
groups = append(groups, diffStats{Name: name})
- prevCase = i
+ prevMode = mode
}
return &groups[len(groups)-1]
}
for _, e := range es {
switch e {
case diff.Identity:
- lastStats(1).NumIdentical++
+ lastStats('=').NumIdentical++
case diff.UniqueX:
- lastStats(2).NumRemoved++
+ lastStats('!').NumRemoved++
case diff.UniqueY:
- lastStats(2).NumInserted++
+ lastStats('!').NumInserted++
case diff.Modified:
- lastStats(2).NumModified++
+ lastStats('!').NumModified++
}
}
return groups
@@ -427,6 +475,35 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats)
// equal groups into adjacent unequal groups that currently result in a
// dual inserted/removed printout. This acts as a high-pass filter to smooth
// out high-frequency changes within the windowSize.
+//
+// Example:
+//
+// WindowSize: 16,
+// Input: [
+// {NumIdentical: 61}, // group 0
+// {NumRemoved: 3, NumInserted: 1}, // group 1
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 9}, // └── coalesce
+// {NumIdentical: 64}, // group 2
+// {NumRemoved: 3, NumInserted: 1}, // group 3
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 7}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 2}, // └── coalesce
+// {NumIdentical: 63}, // group 4
+// ]
+// Output: [
+// {NumIdentical: 61},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 64},
+// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 63},
+// ]
+//
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
groups, groupsOrig := groups[:0], groups
for i, ds := range groupsOrig {
@@ -446,3 +523,91 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
}
return groups
}
+
+// cleanupSurroundingIdentical scans through all unequal groups, and
+// moves any leading sequence of equal elements to the preceding equal group and
+// moves and trailing sequence of equal elements to the succeeding equal group.
+//
+// This is necessary since coalesceInterveningIdentical may coalesce edit groups
+// together such that leading/trailing spans of equal elements becomes possible.
+// Note that this can occur even with an optimal diffing algorithm.
+//
+// Example:
+//
+// Input: [
+// {NumIdentical: 61},
+// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
+// {NumIdentical: 67},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
+// {NumIdentical: 54},
+// ]
+// Output: [
+// {NumIdentical: 64}, // incremented by 3
+// {NumRemoved: 9},
+// {NumIdentical: 67},
+// {NumRemoved: 9},
+// {NumIdentical: 64}, // incremented by 10
+// ]
+//
+func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
+ var ix, iy int // indexes into sequence x and y
+ for i, ds := range groups {
+ // Handle equal group.
+ if ds.NumDiff() == 0 {
+ ix += ds.NumIdentical
+ iy += ds.NumIdentical
+ continue
+ }
+
+ // Handle unequal group.
+ nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
+ ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
+ var numLeadingIdentical, numTrailingIdentical int
+ for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ {
+ numLeadingIdentical++
+ }
+ for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ {
+ numTrailingIdentical++
+ }
+ if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
+ if numLeadingIdentical > 0 {
+ // Remove leading identical span from this group and
+ // insert it into the preceding group.
+ if i-1 >= 0 {
+ groups[i-1].NumIdentical += numLeadingIdentical
+ } else {
+ // No preceding group exists, so prepend a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
+ }()
+ }
+ // Increment indexes since the preceding group would have handled this.
+ ix += numLeadingIdentical
+ iy += numLeadingIdentical
+ }
+ if numTrailingIdentical > 0 {
+ // Remove trailing identical span from this group and
+ // insert it into the succeeding group.
+ if i+1 < len(groups) {
+ groups[i+1].NumIdentical += numTrailingIdentical
+ } else {
+ // No succeeding group exists, so append a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
+ }()
+ }
+ // Do not increment indexes since the succeeding group will handle this.
+ }
+
+ // Update this group since some identical elements were removed.
+ nx -= numIdentical
+ ny -= numIdentical
+ groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
+ }
+ ix += nx
+ iy += ny
+ }
+ return groups
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
new file mode 100644
index 000000000..869379da9
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
@@ -0,0 +1,296 @@
+// Copyright 2021, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package apierror implements a wrapper error for parsing error details from
+// API calls. Currently, only errors representing a gRPC status are supported.
+package apierror
+
+import (
+ "fmt"
+ "strings"
+
+ jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto"
+ "google.golang.org/api/googleapi"
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+// ErrDetails holds the google/rpc/error_details.proto messages.
+type ErrDetails struct {
+ ErrorInfo *errdetails.ErrorInfo
+ BadRequest *errdetails.BadRequest
+ PreconditionFailure *errdetails.PreconditionFailure
+ QuotaFailure *errdetails.QuotaFailure
+ RetryInfo *errdetails.RetryInfo
+ ResourceInfo *errdetails.ResourceInfo
+ RequestInfo *errdetails.RequestInfo
+ DebugInfo *errdetails.DebugInfo
+ Help *errdetails.Help
+ LocalizedMessage *errdetails.LocalizedMessage
+
+ // Unknown stores unidentifiable error details.
+ Unknown []interface{}
+}
+
+func (e ErrDetails) String() string {
+ var d strings.Builder
+ if e.ErrorInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n",
+ e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata()))
+ }
+
+ if e.BadRequest != nil {
+ v := e.BadRequest.GetFieldViolations()
+ var f []string
+ var desc []string
+ for _, x := range v {
+ f = append(f, x.GetField())
+ desc = append(desc, x.GetDescription())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n",
+ strings.Join(f, " "), strings.Join(desc, " ")))
+ }
+
+ if e.PreconditionFailure != nil {
+ v := e.PreconditionFailure.GetViolations()
+ var t []string
+ var s []string
+ var desc []string
+ for _, x := range v {
+ t = append(t, x.GetType())
+ s = append(s, x.GetSubject())
+ desc = append(desc, x.GetDescription())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "),
+ strings.Join(s, " "), strings.Join(desc, " ")))
+ }
+
+ if e.QuotaFailure != nil {
+ v := e.QuotaFailure.GetViolations()
+ var s []string
+ var desc []string
+ for _, x := range v {
+ s = append(s, x.GetSubject())
+ desc = append(desc, x.GetDescription())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n",
+ strings.Join(s, " "), strings.Join(desc, " ")))
+ }
+
+ if e.RequestInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n",
+ e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData()))
+ }
+
+ if e.ResourceInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n",
+ e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(),
+ e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription()))
+
+ }
+ if e.RetryInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration()))
+
+ }
+ if e.Unknown != nil {
+ var s []string
+ for _, x := range e.Unknown {
+ s = append(s, fmt.Sprintf("%v", x))
+ }
+ d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " ")))
+ }
+
+ if e.DebugInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(),
+ strings.Join(e.DebugInfo.GetStackEntries(), " ")))
+ }
+ if e.Help != nil {
+ var desc []string
+ var url []string
+ for _, x := range e.Help.Links {
+ desc = append(desc, x.GetDescription())
+ url = append(url, x.GetUrl())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n",
+ strings.Join(desc, " "), strings.Join(url, " ")))
+ }
+ if e.LocalizedMessage != nil {
+ d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n",
+ e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage()))
+ }
+
+ return d.String()
+}
+
+// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It
+// implements error and Status interfaces.
+type APIError struct {
+ err error
+ status *status.Status
+ httpErr *googleapi.Error
+ details ErrDetails
+}
+
+// Details presents the error details of the APIError.
+func (a *APIError) Details() ErrDetails {
+ return a.details
+}
+
+// Unwrap extracts the original error.
+func (a *APIError) Unwrap() error {
+ return a.err
+}
+
+// Error returns a readable representation of the APIError.
+func (a *APIError) Error() string {
+ var msg string
+ if a.status != nil {
+ msg = a.err.Error()
+ } else if a.httpErr != nil {
+ // Truncate the googleapi.Error message because it dumps the Details in
+ // an ugly way.
+ msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message)
+ }
+ return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details))
+}
+
+// GRPCStatus extracts the underlying gRPC Status error.
+// This method is necessary to fulfill the interface
+// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError.
+func (a *APIError) GRPCStatus() *status.Status {
+ return a.status
+}
+
+// Reason returns the reason in an ErrorInfo.
+// If ErrorInfo is nil, it returns an empty string.
+func (a *APIError) Reason() string {
+ return a.details.ErrorInfo.GetReason()
+}
+
+// Domain returns the domain in an ErrorInfo.
+// If ErrorInfo is nil, it returns an empty string.
+func (a *APIError) Domain() string {
+ return a.details.ErrorInfo.GetDomain()
+}
+
+// Metadata returns the metadata in an ErrorInfo.
+// If ErrorInfo is nil, it returns nil.
+func (a *APIError) Metadata() map[string]string {
+ return a.details.ErrorInfo.GetMetadata()
+
+}
+
+// FromError parses a Status error or a googleapi.Error and builds an APIError.
+func FromError(err error) (*APIError, bool) {
+ if err == nil {
+ return nil, false
+ }
+
+ ae := APIError{err: err}
+ st, isStatus := status.FromError(err)
+ herr, isHTTPErr := err.(*googleapi.Error)
+
+ switch {
+ case isStatus:
+ ae.status = st
+ ae.details = parseDetails(st.Details())
+ case isHTTPErr:
+ ae.httpErr = herr
+ ae.details = parseHTTPDetails(herr)
+ default:
+ return nil, false
+ }
+
+ return &ae, true
+
+}
+
+// parseDetails accepts a slice of interface{} that should be backed by some
+// sort of proto.Message that can be cast to the google/rpc/error_details.proto
+// types.
+//
+// This is for internal use only.
+func parseDetails(details []interface{}) ErrDetails {
+ var ed ErrDetails
+ for _, d := range details {
+ switch d := d.(type) {
+ case *errdetails.ErrorInfo:
+ ed.ErrorInfo = d
+ case *errdetails.BadRequest:
+ ed.BadRequest = d
+ case *errdetails.PreconditionFailure:
+ ed.PreconditionFailure = d
+ case *errdetails.QuotaFailure:
+ ed.QuotaFailure = d
+ case *errdetails.RetryInfo:
+ ed.RetryInfo = d
+ case *errdetails.ResourceInfo:
+ ed.ResourceInfo = d
+ case *errdetails.RequestInfo:
+ ed.RequestInfo = d
+ case *errdetails.DebugInfo:
+ ed.DebugInfo = d
+ case *errdetails.Help:
+ ed.Help = d
+ case *errdetails.LocalizedMessage:
+ ed.LocalizedMessage = d
+ default:
+ ed.Unknown = append(ed.Unknown, d)
+ }
+ }
+
+ return ed
+}
+
+// parseHTTPDetails will convert the given googleapi.Error into the protobuf
+// representation then parse the Any values that contain the error details.
+//
+// This is for internal use only.
+func parseHTTPDetails(gae *googleapi.Error) ErrDetails {
+ e := &jsonerror.Error{}
+ if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil {
+ // If the error body does not conform to the error schema, ignore it
+ // altogther. See https://cloud.google.com/apis/design/errors#http_mapping.
+ return ErrDetails{}
+ }
+
+ // Coerce the Any messages into proto.Message then parse the details.
+ details := []interface{}{}
+ for _, any := range e.GetError().GetDetails() {
+ m, err := any.UnmarshalNew()
+ if err != nil {
+ // Ignore malformed Any values.
+ continue
+ }
+ details = append(details, m)
+ }
+
+ return parseDetails(details)
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
new file mode 100644
index 000000000..9ff0caea9
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
@@ -0,0 +1,30 @@
+# HTTP JSON Error Schema
+
+The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey
+error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping.
+This package is for internal parsing logic only and should not be used in any
+other context.
+
+## Regeneration
+
+To regenerate the protobuf Go code you will need the following:
+
+* A local copy of [googleapis], the absolute path to which should be exported to
+the environment variable `GOOGLEAPIS`
+* The protobuf compiler [protoc]
+* The Go [protobuf plugin]
+* The [goimports] tool
+
+From this directory run the following command:
+```sh
+protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto
+goimports -w .
+```
+
+Note: the `module` plugin option ensures the generated code is placed in this
+directory, and not in several nested directories defined by `go_package` option.
+
+[googleapis]: https://github.com/googleapis/googleapis
+[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation
+[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated
+[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports
\ No newline at end of file
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
new file mode 100644
index 000000000..27b34c06e
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
@@ -0,0 +1,278 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.27.1
+// protoc v3.15.8
+// source: error.proto
+
+package jsonerror
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ code "google.golang.org/genproto/googleapis/rpc/code"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The error format v2 for Google JSON REST APIs.
+// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
+//
+// NOTE: This schema is not used for other wire protocols.
+type Error struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The actual error payload. The nested message structure is for backward
+ // compatibility with Google API client libraries. It also makes the error
+ // more readable to developers.
+ Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *Error) Reset() {
+ *x = Error{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_error_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Error) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Error) ProtoMessage() {}
+
+func (x *Error) ProtoReflect() protoreflect.Message {
+ mi := &file_error_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Error.ProtoReflect.Descriptor instead.
+func (*Error) Descriptor() ([]byte, []int) {
+ return file_error_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Error) GetError() *Error_Status {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+// This message has the same semantics as `google.rpc.Status`. It uses HTTP
+// status code instead of gRPC status code. It has an extra field `status`
+// for backward compatibility with Google API Client Libraries.
+type Error_Status struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP status code that corresponds to `google.rpc.Status.code`.
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ // This corresponds to `google.rpc.Status.message`.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ // This is the enum version for `google.rpc.Status.code`.
+ Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"`
+ // This corresponds to `google.rpc.Status.details`.
+ Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
+}
+
+func (x *Error_Status) Reset() {
+ *x = Error_Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_error_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Error_Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Error_Status) ProtoMessage() {}
+
+func (x *Error_Status) ProtoReflect() protoreflect.Message {
+ mi := &file_error_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead.
+func (*Error_Status) Descriptor() ([]byte, []int) {
+ return file_error_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Error_Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
+ }
+ return 0
+}
+
+func (x *Error_Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Error_Status) GetStatus() code.Code {
+ if x != nil {
+ return x.Status
+ }
+ return code.Code(0)
+}
+
+func (x *Error_Status) GetDetails() []*anypb.Any {
+ if x != nil {
+ return x.Details
+ }
+ return nil
+}
+
+var File_error_proto protoreflect.FileDescriptor
+
+var file_error_proto_rawDesc = []byte{
+ 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72,
+ 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
+ 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e,
+ 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43,
+ 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76,
+ 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_error_proto_rawDescOnce sync.Once
+ file_error_proto_rawDescData = file_error_proto_rawDesc
+)
+
+func file_error_proto_rawDescGZIP() []byte {
+ file_error_proto_rawDescOnce.Do(func() {
+ file_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_error_proto_rawDescData)
+ })
+ return file_error_proto_rawDescData
+}
+
+var file_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_error_proto_goTypes = []interface{}{
+ (*Error)(nil), // 0: error.Error
+ (*Error_Status)(nil), // 1: error.Error.Status
+ (code.Code)(0), // 2: google.rpc.Code
+ (*anypb.Any)(nil), // 3: google.protobuf.Any
+}
+var file_error_proto_depIdxs = []int32{
+ 1, // 0: error.Error.error:type_name -> error.Error.Status
+ 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code
+ 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_error_proto_init() }
+func file_error_proto_init() {
+ if File_error_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Error); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Error_Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_error_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_error_proto_goTypes,
+ DependencyIndexes: file_error_proto_depIdxs,
+ MessageInfos: file_error_proto_msgTypes,
+ }.Build()
+ File_error_proto = out.File
+ file_error_proto_rawDesc = nil
+ file_error_proto_goTypes = nil
+ file_error_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
new file mode 100644
index 000000000..4b9b13ce1
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
@@ -0,0 +1,46 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package error;
+
+import "google/protobuf/any.proto";
+import "google/rpc/code.proto";
+
+option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror";
+
+// The error format v2 for Google JSON REST APIs.
+// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
+//
+// NOTE: This schema is not used for other wire protocols.
+message Error {
+ // This message has the same semantics as `google.rpc.Status`. It uses HTTP
+ // status code instead of gRPC status code. It has an extra field `status`
+ // for backward compatibility with Google API Client Libraries.
+ message Status {
+ // The HTTP status code that corresponds to `google.rpc.Status.code`.
+ int32 code = 1;
+ // This corresponds to `google.rpc.Status.message`.
+ string message = 2;
+ // This is the enum version for `google.rpc.Status.code`.
+ google.rpc.Code status = 4;
+ // This corresponds to `google.rpc.Status.details`.
+ repeated google.protobuf.Any details = 5;
+ }
+ // The actual error payload. The nested message structure is for backward
+ // compatibility with Google API client libraries. It also makes the error
+ // more readable to developers.
+ Status error = 1;
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go
index b1d53dd19..425a7668d 100644
--- a/vendor/github.com/googleapis/gax-go/v2/call_option.go
+++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go
@@ -47,7 +47,7 @@ type CallOption interface {
// Retryer is used by Invoke to determine retry behavior.
type Retryer interface {
- // Retry reports whether a request should be retriedand how long to pause before retrying
+ // Retry reports whether a request should be retried and how long to pause before retrying
// if the previous attempt returned with err. Invoke never calls Retry with nil error.
Retry(err error) (pause time.Duration, shouldRetry bool)
}
@@ -63,6 +63,31 @@ func WithRetry(fn func() Retryer) CallOption {
return retryerOption(fn)
}
+// OnErrorFunc returns a Retryer that retries if and only if the previous attempt
+// returns an error that satisfies shouldRetry.
+//
+// Pause times between retries are specified by bo. bo is only used for its
+// parameters; each Retryer has its own copy.
+func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer {
+ return &errorRetryer{
+ shouldRetry: shouldRetry,
+ backoff: bo,
+ }
+}
+
+type errorRetryer struct {
+ backoff Backoff
+ shouldRetry func(err error) bool
+}
+
+func (r *errorRetryer) Retry(err error) (time.Duration, bool) {
+ if r.shouldRetry(err) {
+ return r.backoff.Pause(), true
+ }
+
+ return 0, false
+}
+
// OnCodes returns a Retryer that retries if and only if
// the previous attempt returns a GRPC error whose error code is stored in cc.
// Pause times between retries are specified by bo.
@@ -94,22 +119,25 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) {
return 0, false
}
-// Backoff implements exponential backoff.
-// The wait time between retries is a random value between 0 and the "retry envelope".
-// The envelope starts at Initial and increases by the factor of Multiplier every retry,
-// but is capped at Max.
+// Backoff implements exponential backoff. The wait time between retries is a
+// random value between 0 and the "retry period" - the time between retries. The
+// retry period starts at Initial and increases by the factor of Multiplier
+// every retry, but is capped at Max.
+//
+// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should
+// be built on top of Backoff.
type Backoff struct {
- // Initial is the initial value of the retry envelope, defaults to 1 second.
+ // Initial is the initial value of the retry period, defaults to 1 second.
Initial time.Duration
- // Max is the maximum value of the retry envelope, defaults to 30 seconds.
+ // Max is the maximum value of the retry period, defaults to 30 seconds.
Max time.Duration
- // Multiplier is the factor by which the retry envelope increases.
+ // Multiplier is the factor by which the retry period increases.
// It should be greater than 1 and defaults to 2.
Multiplier float64
- // cur is the current retry envelope
+ // cur is the current retry period.
cur time.Duration
}
diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go
index 3fd1b0b84..dfc4beb28 100644
--- a/vendor/github.com/googleapis/gax-go/v2/gax.go
+++ b/vendor/github.com/googleapis/gax-go/v2/gax.go
@@ -36,4 +36,4 @@
package gax
// Version specifies the gax-go version being used.
-const Version = "2.0.4"
+const Version = "2.0.5"
diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod
index 9cdfaf447..54d6b50e0 100644
--- a/vendor/github.com/googleapis/gax-go/v2/go.mod
+++ b/vendor/github.com/googleapis/gax-go/v2/go.mod
@@ -1,3 +1,12 @@
module github.com/googleapis/gax-go/v2
-require google.golang.org/grpc v1.19.0
+go 1.11
+
+require (
+ github.com/google/go-cmp v0.5.6
+ google.golang.org/api v0.54.0
+ google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8
+ google.golang.org/grpc v1.40.0
+ google.golang.org/protobuf v1.27.1
+
+)
diff --git a/vendor/github.com/googleapis/gax-go/v2/go.sum b/vendor/github.com/googleapis/gax-go/v2/go.sum
index 7fa23ecf9..ccde765ff 100644
--- a/vendor/github.com/googleapis/gax-go/v2/go.sum
+++ b/vendor/github.com/googleapis/gax-go/v2/go.sum
@@ -1,25 +1,527 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8 h1:XosVttQUxX8erNhEruTu053/VchgYuksoS9Bj/OITjU=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go
index fe31dd004..9fcc29959 100644
--- a/vendor/github.com/googleapis/gax-go/v2/invoke.go
+++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go
@@ -33,13 +33,15 @@ import (
"context"
"strings"
"time"
+
+ "github.com/googleapis/gax-go/v2/apierror"
)
// APICall is a user defined call stub.
type APICall func(context.Context, CallSettings) error
-// Invoke calls the given APICall,
-// performing retries as specified by opts, if any.
+// Invoke calls the given APICall, performing retries as specified by opts, if
+// any.
func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
var settings CallSettings
for _, opt := range opts {
@@ -71,9 +73,6 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper
if err == nil {
return nil
}
- if settings.Retry == nil {
- return err
- }
// Never retry permanent certificate errors. (e.x. if ca-certificates
// are not installed). We should only make very few, targeted
// exceptions: many (other) status=Unavailable should be retried, such
@@ -83,6 +82,12 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper
if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
return err
}
+ if apierr, ok := apierror.FromError(err); ok {
+ err = apierr
+ }
+ if settings.Retry == nil {
+ return err
+ }
if retryer == nil {
if r := settings.Retry(); r != nil {
retryer = r
diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md
index 848b16c69..ee9783d23 100644
--- a/vendor/github.com/googleapis/gnostic/compiler/README.md
+++ b/vendor/github.com/googleapis/gnostic/compiler/README.md
@@ -1,3 +1,4 @@
# Compiler support code
-This directory contains compiler support code used by Gnostic and Gnostic extensions.
\ No newline at end of file
+This directory contains compiler support code used by Gnostic and Gnostic
+extensions.
diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go
index a64c1b75d..1bfe96121 100644
--- a/vendor/github.com/googleapis/gnostic/compiler/context.go
+++ b/vendor/github.com/googleapis/gnostic/compiler/context.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,30 +14,36 @@
package compiler
+import (
+ yaml "gopkg.in/yaml.v3"
+)
+
// Context contains state of the compiler as it traverses a document.
type Context struct {
Parent *Context
Name string
+ Node *yaml.Node
ExtensionHandlers *[]ExtensionHandler
}
// NewContextWithExtensions returns a new object representing the compiler state
-func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
- return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
+func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
+ return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers}
}
// NewContext returns a new object representing the compiler state
-func NewContext(name string, parent *Context) *Context {
+func NewContext(name string, node *yaml.Node, parent *Context) *Context {
if parent != nil {
- return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
+ return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
}
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
}
// Description returns a text description of the compiler state
func (context *Context) Description() string {
+ name := context.Name
if context.Parent != nil {
- return context.Parent.Description() + "." + context.Name
+ name = context.Parent.Description() + "." + name
}
- return context.Name
+ return name
}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go
index d8672c100..6f40515d6 100644
--- a/vendor/github.com/googleapis/gnostic/compiler/error.go
+++ b/vendor/github.com/googleapis/gnostic/compiler/error.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@
package compiler
+import "fmt"
+
// Error represents compiler errors and their location in the document.
type Error struct {
Context *Context
@@ -25,12 +27,19 @@ func NewError(context *Context, message string) *Error {
return &Error{Context: context, Message: message}
}
+func (err *Error) locationDescription() string {
+ if err.Context.Node != nil {
+ return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description())
+ }
+ return err.Context.Description()
+}
+
// Error returns the string value of an Error.
func (err *Error) Error() string {
if err.Context == nil {
- return "ERROR " + err.Message
+ return err.Message
}
- return "ERROR " + err.Context.Description() + " " + err.Message
+ return err.locationDescription() + " " + err.Message
}
// ErrorGroup is a container for groups of Error values.
diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
deleted file mode 100644
index 1f85b650e..000000000
--- a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compiler
-
-import (
- "bytes"
- "fmt"
- "os/exec"
-
- "strings"
-
- "errors"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
- ext_plugin "github.com/googleapis/gnostic/extensions"
- yaml "gopkg.in/yaml.v2"
-)
-
-// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
-type ExtensionHandler struct {
- Name string
-}
-
-// HandleExtension calls a binary extension handler.
-func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
- handled := false
- var errFromPlugin error
- var outFromPlugin *any.Any
-
- if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
- for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
- outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
- if outFromPlugin == nil {
- continue
- } else {
- handled = true
- break
- }
- }
- }
- return handled, outFromPlugin, errFromPlugin
-}
-
-func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
- if extensionHandlers.Name != "" {
- binary, _ := yaml.Marshal(in)
-
- request := &ext_plugin.ExtensionHandlerRequest{}
-
- version := &ext_plugin.Version{}
- version.Major = 0
- version.Minor = 1
- version.Patch = 0
- request.CompilerVersion = version
-
- request.Wrapper = &ext_plugin.Wrapper{}
-
- request.Wrapper.Version = "v2"
- request.Wrapper.Yaml = string(binary)
- request.Wrapper.ExtensionName = extensionName
-
- requestBytes, _ := proto.Marshal(request)
- cmd := exec.Command(extensionHandlers.Name)
- cmd.Stdin = bytes.NewReader(requestBytes)
- output, err := cmd.Output()
-
- if err != nil {
- fmt.Printf("Error: %+v\n", err)
- return nil, err
- }
- response := &ext_plugin.ExtensionHandlerResponse{}
- err = proto.Unmarshal(output, response)
- if err != nil {
- fmt.Printf("Error: %+v\n", err)
- fmt.Printf("%s\n", string(output))
- return nil, err
- }
- if !response.Handled {
- return nil, nil
- }
- if len(response.Error) != 0 {
- message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
- return nil, errors.New(message)
- }
- return response.Value, nil
- }
- return nil, nil
-}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/vendor/github.com/googleapis/gnostic/compiler/extensions.go
new file mode 100644
index 000000000..20848a0a1
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/extensions.go
@@ -0,0 +1,85 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+import (
+ "bytes"
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes/any"
+ extensions "github.com/googleapis/gnostic/extensions"
+ yaml "gopkg.in/yaml.v3"
+)
+
+// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
+type ExtensionHandler struct {
+ Name string
+}
+
+// CallExtension calls a binary extension handler.
+func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
+ if context == nil || context.ExtensionHandlers == nil {
+ return false, nil, nil
+ }
+ handled = false
+ for _, handler := range *(context.ExtensionHandlers) {
+ response, err = handler.handle(in, extensionName)
+ if response == nil {
+ continue
+ } else {
+ handled = true
+ break
+ }
+ }
+ return handled, response, err
+}
+
+func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
+ if extensionHandlers.Name != "" {
+ yamlData, _ := yaml.Marshal(in)
+ request := &extensions.ExtensionHandlerRequest{
+ CompilerVersion: &extensions.Version{
+ Major: 0,
+ Minor: 1,
+ Patch: 0,
+ },
+ Wrapper: &extensions.Wrapper{
+ Version: "unknown", // TODO: set this to the type/version of spec being parsed.
+ Yaml: string(yamlData),
+ ExtensionName: extensionName,
+ },
+ }
+ requestBytes, _ := proto.Marshal(request)
+ cmd := exec.Command(extensionHandlers.Name)
+ cmd.Stdin = bytes.NewReader(requestBytes)
+ output, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ response := &extensions.ExtensionHandlerResponse{}
+ err = proto.Unmarshal(output, response)
+ if err != nil || !response.Handled {
+ return nil, err
+ }
+ if len(response.Errors) != 0 {
+ return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ","))
+ }
+ return response.Value, nil
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go
index 76df635ff..48f02f395 100644
--- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go
+++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,56 +16,63 @@ package compiler
import (
"fmt"
- "gopkg.in/yaml.v2"
"regexp"
"sort"
"strconv"
+
+ "github.com/googleapis/gnostic/jsonschema"
+ "gopkg.in/yaml.v3"
)
// compiler helper functions, usually called from generated code
-// UnpackMap gets a yaml.MapSlice if possible.
-func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
- m, ok := in.(yaml.MapSlice)
- if ok {
- return m, true
- }
- // do we have an empty array?
- a, ok := in.([]interface{})
- if ok && len(a) == 0 {
- // if so, return an empty map
- return yaml.MapSlice{}, true
+// UnpackMap gets a *yaml.Node if possible.
+func UnpackMap(in *yaml.Node) (*yaml.Node, bool) {
+ if in == nil {
+ return nil, false
}
- return nil, false
+ return in, true
}
-// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
-func SortedKeysForMap(m yaml.MapSlice) []string {
+// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice.
+func SortedKeysForMap(m *yaml.Node) []string {
keys := make([]string, 0)
- for _, item := range m {
- keys = append(keys, item.Key.(string))
+ if m.Kind == yaml.MappingNode {
+ for i := 0; i < len(m.Content); i += 2 {
+ keys = append(keys, m.Content[i].Value)
+ }
}
sort.Strings(keys)
return keys
}
-// MapHasKey returns true if a yaml.MapSlice contains a specified key.
-func MapHasKey(m yaml.MapSlice, key string) bool {
- for _, item := range m {
- itemKey, ok := item.Key.(string)
- if ok && key == itemKey {
- return true
+// MapHasKey returns true if a yamlv2.MapSlice contains a specified key.
+func MapHasKey(m *yaml.Node, key string) bool {
+ if m == nil {
+ return false
+ }
+ if m.Kind == yaml.MappingNode {
+ for i := 0; i < len(m.Content); i += 2 {
+ itemKey := m.Content[i].Value
+ if key == itemKey {
+ return true
+ }
}
}
return false
}
// MapValueForKey gets the value of a map value for a specified key.
-func MapValueForKey(m yaml.MapSlice, key string) interface{} {
- for _, item := range m {
- itemKey, ok := item.Key.(string)
- if ok && key == itemKey {
- return item.Value
+func MapValueForKey(m *yaml.Node, key string) *yaml.Node {
+ if m == nil {
+ return nil
+ }
+ if m.Kind == yaml.MappingNode {
+ for i := 0; i < len(m.Content); i += 2 {
+ itemKey := m.Content[i].Value
+ if key == itemKey {
+ return m.Content[i+1]
+ }
}
}
return nil
@@ -83,8 +90,118 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
return stringArray
}
+// SequenceNodeForNode returns a node if it is a SequenceNode.
+func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) {
+ if node.Kind != yaml.SequenceNode {
+ return nil, false
+ }
+ return node, true
+}
+
+// BoolForScalarNode returns the bool value of a node.
+func BoolForScalarNode(node *yaml.Node) (bool, bool) {
+ if node == nil {
+ return false, false
+ }
+ if node.Kind == yaml.DocumentNode {
+ return BoolForScalarNode(node.Content[0])
+ }
+ if node.Kind != yaml.ScalarNode {
+ return false, false
+ }
+ if node.Tag != "!!bool" {
+ return false, false
+ }
+ v, err := strconv.ParseBool(node.Value)
+ if err != nil {
+ return false, false
+ }
+ return v, true
+}
+
+// IntForScalarNode returns the integer value of a node.
+func IntForScalarNode(node *yaml.Node) (int64, bool) {
+ if node == nil {
+ return 0, false
+ }
+ if node.Kind == yaml.DocumentNode {
+ return IntForScalarNode(node.Content[0])
+ }
+ if node.Kind != yaml.ScalarNode {
+ return 0, false
+ }
+ if node.Tag != "!!int" {
+ return 0, false
+ }
+ v, err := strconv.ParseInt(node.Value, 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ return v, true
+}
+
+// FloatForScalarNode returns the float value of a node.
+func FloatForScalarNode(node *yaml.Node) (float64, bool) {
+ if node == nil {
+ return 0.0, false
+ }
+ if node.Kind == yaml.DocumentNode {
+ return FloatForScalarNode(node.Content[0])
+ }
+ if node.Kind != yaml.ScalarNode {
+ return 0.0, false
+ }
+ if (node.Tag != "!!int") && (node.Tag != "!!float") {
+ return 0.0, false
+ }
+ v, err := strconv.ParseFloat(node.Value, 64)
+ if err != nil {
+ return 0.0, false
+ }
+ return v, true
+}
+
+// StringForScalarNode returns the string value of a node.
+func StringForScalarNode(node *yaml.Node) (string, bool) {
+ if node == nil {
+ return "", false
+ }
+ if node.Kind == yaml.DocumentNode {
+ return StringForScalarNode(node.Content[0])
+ }
+ switch node.Kind {
+ case yaml.ScalarNode:
+ switch node.Tag {
+ case "!!int":
+ return node.Value, true
+ case "!!str":
+ return node.Value, true
+ case "!!timestamp":
+ return node.Value, true
+ case "!!null":
+ return "", true
+ default:
+ return "", false
+ }
+ default:
+ return "", false
+ }
+}
+
+// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible.
+func StringArrayForSequenceNode(node *yaml.Node) []string {
+ stringArray := make([]string, 0)
+ for _, item := range node.Content {
+ v, ok := StringForScalarNode(item)
+ if ok {
+ stringArray = append(stringArray, v)
+ }
+ }
+ return stringArray
+}
+
// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
-func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
+func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string {
missingKeys := make([]string, 0)
for _, k := range requiredKeys {
if !MapHasKey(m, k) {
@@ -95,64 +212,109 @@ func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
}
// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
-func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
+func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
invalidKeys := make([]string, 0)
- for _, item := range m {
- itemKey, ok := item.Key.(string)
- if ok {
- key := itemKey
- found := false
- // does the key match an allowed key?
- for _, allowedKey := range allowedKeys {
- if key == allowedKey {
+ if m == nil || m.Kind != yaml.MappingNode {
+ return invalidKeys
+ }
+ for i := 0; i < len(m.Content); i += 2 {
+ key := m.Content[i].Value
+ found := false
+ // does the key match an allowed key?
+ for _, allowedKey := range allowedKeys {
+ if key == allowedKey {
+ found = true
+ break
+ }
+ }
+ if !found {
+ // does the key match an allowed pattern?
+ for _, allowedPattern := range allowedPatterns {
+ if allowedPattern.MatchString(key) {
found = true
break
}
}
if !found {
- // does the key match an allowed pattern?
- for _, allowedPattern := range allowedPatterns {
- if allowedPattern.MatchString(key) {
- found = true
- break
- }
- }
- if !found {
- invalidKeys = append(invalidKeys, key)
- }
+ invalidKeys = append(invalidKeys, key)
}
}
}
return invalidKeys
}
-// DescribeMap describes a map (for debugging purposes).
-func DescribeMap(in interface{}, indent string) string {
- description := ""
- m, ok := in.(map[string]interface{})
- if ok {
- keys := make([]string, 0)
- for k := range m {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- v := m[k]
- description += fmt.Sprintf("%s%s:\n", indent, k)
- description += DescribeMap(v, indent+" ")
- }
- return description
+// NewNullNode creates a new Null node.
+func NewNullNode() *yaml.Node {
+ node := &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!null",
}
- a, ok := in.([]interface{})
- if ok {
- for i, v := range a {
- description += fmt.Sprintf("%s%d:\n", indent, i)
- description += DescribeMap(v, indent+" ")
- }
- return description
+ return node
+}
+
+// NewMappingNode creates a new Mapping node.
+func NewMappingNode() *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.MappingNode,
+ Content: make([]*yaml.Node, 0),
+ }
+}
+
+// NewSequenceNode creates a new Sequence node.
+func NewSequenceNode() *yaml.Node {
+ node := &yaml.Node{
+ Kind: yaml.SequenceNode,
+ Content: make([]*yaml.Node, 0),
+ }
+ return node
+}
+
+// NewScalarNodeForString creates a new node to hold a string.
+func NewScalarNodeForString(s string) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!str",
+ Value: s,
+ }
+}
+
+// NewSequenceNodeForStringArray creates a new node to hold an array of strings.
+func NewSequenceNodeForStringArray(strings []string) *yaml.Node {
+ node := &yaml.Node{
+ Kind: yaml.SequenceNode,
+ Content: make([]*yaml.Node, 0),
+ }
+ for _, s := range strings {
+ node.Content = append(node.Content, NewScalarNodeForString(s))
+ }
+ return node
+}
+
+// NewScalarNodeForBool creates a new node to hold a bool.
+func NewScalarNodeForBool(b bool) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!bool",
+ Value: fmt.Sprintf("%t", b),
+ }
+}
+
+// NewScalarNodeForFloat creates a new node to hold a float.
+func NewScalarNodeForFloat(f float64) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!float",
+ Value: fmt.Sprintf("%g", f),
+ }
+}
+
+// NewScalarNodeForInt creates a new node to hold an integer.
+func NewScalarNodeForInt(i int64) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!int",
+ Value: fmt.Sprintf("%d", i),
}
- description += fmt.Sprintf("%s%+v\n", indent, in)
- return description
}
// PluralProperties returns the string "properties" pluralized.
@@ -195,3 +357,40 @@ func StringValue(item interface{}) (value string, ok bool) {
}
return "", false
}
+
+// Description returns a human-readable represention of an item.
+func Description(item interface{}) string {
+ value, ok := item.(*yaml.Node)
+ if ok {
+ return jsonschema.Render(value)
+ }
+ return fmt.Sprintf("%+v", item)
+}
+
+// Display returns a description of a node for use in error messages.
+func Display(node *yaml.Node) string {
+ switch node.Kind {
+ case yaml.ScalarNode:
+ switch node.Tag {
+ case "!!str":
+ return fmt.Sprintf("%s (string)", node.Value)
+ }
+ }
+ return fmt.Sprintf("%+v (%T)", node, node)
+}
+
+// Marshal creates a yaml version of a structure in our preferred style
+func Marshal(in *yaml.Node) []byte {
+ clearStyle(in)
+ //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}})
+ bytes, _ := yaml.Marshal(in)
+
+ return bytes
+}
+
+func clearStyle(node *yaml.Node) {
+ node.Style = 0
+ for _, c := range node.Content {
+ clearStyle(c)
+ }
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go
index 9713a21cc..ce9fcc456 100644
--- a/vendor/github.com/googleapis/gnostic/compiler/main.go
+++ b/vendor/github.com/googleapis/gnostic/compiler/main.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go
index 955b985b8..be0e8b40c 100644
--- a/vendor/github.com/googleapis/gnostic/compiler/reader.go
+++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,6 @@
package compiler
import (
- "errors"
"fmt"
"io/ioutil"
"log"
@@ -23,18 +22,30 @@ import (
"net/url"
"path/filepath"
"strings"
+ "sync"
- yaml "gopkg.in/yaml.v2"
+ yaml "gopkg.in/yaml.v3"
)
+var verboseReader = false
+
var fileCache map[string][]byte
-var infoCache map[string]interface{}
-var count int64
+var infoCache map[string]*yaml.Node
-var verboseReader = false
var fileCacheEnable = true
var infoCacheEnable = true
+// These locks are used to synchronize accesses to the fileCache and infoCache
+// maps (above). They are global state and can throw thread-related errors
+// when modified from separate goroutines. The general strategy is to protect
+// all public functions in this file with mutex Lock() calls. As a result, to
+// avoid deadlock, these public functions should not call other public
+// functions, so some public functions have private equivalents.
+// In the future, we might consider replacing the maps with sync.Map and
+// eliminating these mutexes.
+var fileCacheMutex sync.Mutex
+var infoCacheMutex sync.Mutex
+
func initializeFileCache() {
if fileCache == nil {
fileCache = make(map[string][]byte, 0)
@@ -43,27 +54,42 @@ func initializeFileCache() {
func initializeInfoCache() {
if infoCache == nil {
- infoCache = make(map[string]interface{}, 0)
+ infoCache = make(map[string]*yaml.Node, 0)
}
}
+// EnableFileCache turns on file caching.
func EnableFileCache() {
+ fileCacheMutex.Lock()
+ defer fileCacheMutex.Unlock()
fileCacheEnable = true
}
+// EnableInfoCache turns on parsed info caching.
func EnableInfoCache() {
+ infoCacheMutex.Lock()
+ defer infoCacheMutex.Unlock()
infoCacheEnable = true
}
+// DisableFileCache turns off file caching.
func DisableFileCache() {
+ fileCacheMutex.Lock()
+ defer fileCacheMutex.Unlock()
fileCacheEnable = false
}
+// DisableInfoCache turns off parsed info caching.
func DisableInfoCache() {
+ infoCacheMutex.Lock()
+ defer infoCacheMutex.Unlock()
infoCacheEnable = false
}
+// RemoveFromFileCache removes an entry from the file cache.
func RemoveFromFileCache(fileurl string) {
+ fileCacheMutex.Lock()
+ defer fileCacheMutex.Unlock()
if !fileCacheEnable {
return
}
@@ -71,7 +97,10 @@ func RemoveFromFileCache(fileurl string) {
delete(fileCache, fileurl)
}
+// RemoveFromInfoCache removes an entry from the info cache.
func RemoveFromInfoCache(filename string) {
+ infoCacheMutex.Lock()
+ defer infoCacheMutex.Unlock()
if !infoCacheEnable {
return
}
@@ -79,21 +108,31 @@ func RemoveFromInfoCache(filename string) {
delete(infoCache, filename)
}
-func GetInfoCache() map[string]interface{} {
+// GetInfoCache returns the info cache map.
+func GetInfoCache() map[string]*yaml.Node {
+ infoCacheMutex.Lock()
+ defer infoCacheMutex.Unlock()
if infoCache == nil {
initializeInfoCache()
}
return infoCache
}
+// ClearFileCache clears the file cache.
func ClearFileCache() {
+ fileCacheMutex.Lock()
+ defer fileCacheMutex.Unlock()
fileCache = make(map[string][]byte, 0)
}
+// ClearInfoCache clears the info cache.
func ClearInfoCache() {
- infoCache = make(map[string]interface{})
+ infoCacheMutex.Lock()
+ defer infoCacheMutex.Unlock()
+ infoCache = make(map[string]*yaml.Node)
}
+// ClearCaches clears all caches.
func ClearCaches() {
ClearFileCache()
ClearInfoCache()
@@ -101,6 +140,12 @@ func ClearCaches() {
// FetchFile gets a specified file from the local filesystem or a remote location.
func FetchFile(fileurl string) ([]byte, error) {
+ fileCacheMutex.Lock()
+ defer fileCacheMutex.Unlock()
+ return fetchFile(fileurl)
+}
+
+func fetchFile(fileurl string) ([]byte, error) {
var bytes []byte
initializeFileCache()
if fileCacheEnable {
@@ -121,7 +166,7 @@ func FetchFile(fileurl string) ([]byte, error) {
}
defer response.Body.Close()
if response.StatusCode != 200 {
- return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
+ return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status)
}
bytes, err = ioutil.ReadAll(response.Body)
if fileCacheEnable && err == nil {
@@ -132,11 +177,17 @@ func FetchFile(fileurl string) ([]byte, error) {
// ReadBytesForFile reads the bytes of a file.
func ReadBytesForFile(filename string) ([]byte, error) {
+ fileCacheMutex.Lock()
+ defer fileCacheMutex.Unlock()
+ return readBytesForFile(filename)
+}
+
+func readBytesForFile(filename string) ([]byte, error) {
// is the filename a url?
fileurl, _ := url.Parse(filename)
if fileurl.Scheme != "" {
// yes, fetch it
- bytes, err := FetchFile(filename)
+ bytes, err := fetchFile(filename)
if err != nil {
return nil, err
}
@@ -150,8 +201,14 @@ func ReadBytesForFile(filename string) ([]byte, error) {
return bytes, nil
}
-// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
-func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
+// ReadInfoFromBytes unmarshals a file as a *yaml.Node.
+func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
+ infoCacheMutex.Lock()
+ defer infoCacheMutex.Unlock()
+ return readInfoFromBytes(filename, bytes)
+}
+
+func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
initializeInfoCache()
if infoCacheEnable {
cachedInfo, ok := infoCache[filename]
@@ -165,19 +222,23 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
log.Printf("Reading info for file %s", filename)
}
}
- var info yaml.MapSlice
+ var info yaml.Node
err := yaml.Unmarshal(bytes, &info)
if err != nil {
return nil, err
}
if infoCacheEnable && len(filename) > 0 {
- infoCache[filename] = info
+ infoCache[filename] = &info
}
- return info, nil
+ return &info, nil
}
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
-func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
+func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) {
+ fileCacheMutex.Lock()
+ defer fileCacheMutex.Unlock()
+ infoCacheMutex.Lock()
+ defer infoCacheMutex.Unlock()
initializeInfoCache()
if infoCacheEnable {
info, ok := infoCache[ref]
@@ -191,7 +252,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
log.Printf("Reading info for ref %s#%s", basefile, ref)
}
}
- count = count + 1
basedir, _ := filepath.Split(basefile)
parts := strings.Split(ref, "#")
var filename string
@@ -204,24 +264,30 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
} else {
filename = basefile
}
- bytes, err := ReadBytesForFile(filename)
+ bytes, err := readBytesForFile(filename)
if err != nil {
return nil, err
}
- info, err := ReadInfoFromBytes(filename, bytes)
+ info, err := readInfoFromBytes(filename, bytes)
+ if info != nil && info.Kind == yaml.DocumentNode {
+ info = info.Content[0]
+ }
if err != nil {
log.Printf("File error: %v\n", err)
} else {
+ if info == nil {
+ return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
+ }
if len(parts) > 1 {
path := strings.Split(parts[1], "/")
for i, key := range path {
if i > 0 {
- m, ok := info.(yaml.MapSlice)
- if ok {
+ m := info
+ if true {
found := false
- for _, section := range m {
- if section.Key == key {
- info = section.Value
+ for i := 0; i < len(m.Content); i += 2 {
+ if m.Content[i].Value == key {
+ info = m.Content[i+1]
found = true
}
}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md
index ff1c2eb1e..4b5d63e58 100644
--- a/vendor/github.com/googleapis/gnostic/extensions/README.md
+++ b/vendor/github.com/googleapis/gnostic/extensions/README.md
@@ -1,5 +1,13 @@
# Extensions
-This directory contains support code for building Gnostic extensions and associated examples.
+**Extension Support is experimental.**
-Extensions are used to compile vendor or specification extensions into protocol buffer structures.
+This directory contains support code for building Gnostic extensio handlers and
+associated examples.
+
+Extension handlers can be used to compile vendor or specification extensions
+into protocol buffer structures.
+
+Like plugins, extension handlers are built as separate executables. Extension
+bodies are written to extension handlers as serialized
+ExtensionHandlerRequests.
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
index 432dc06e6..5aab58ebf 100644
--- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
+++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
@@ -1,148 +1,181 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.15.5
// source: extensions/extension.proto
-package openapiextension_v1
+package gnostic_extension_v1
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- any "github.com/golang/protobuf/ptypes/any"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// The version number of OpenAPI compiler.
+// The version number of Gnostic.
type Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
- Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
}
-func (m *Version) Reset() { *m = Version{} }
-func (m *Version) String() string { return proto.CompactTextString(m) }
-func (*Version) ProtoMessage() {}
-func (*Version) Descriptor() ([]byte, []int) {
- return fileDescriptor_661e47e790f76671, []int{0}
+func (x *Version) Reset() {
+ *x = Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_extensions_extension_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Version) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Version.Unmarshal(m, b)
-}
-func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Version.Marshal(b, m, deterministic)
+func (x *Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Version) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Version.Merge(m, src)
-}
-func (m *Version) XXX_Size() int {
- return xxx_messageInfo_Version.Size(m)
-}
-func (m *Version) XXX_DiscardUnknown() {
- xxx_messageInfo_Version.DiscardUnknown(m)
+
+func (*Version) ProtoMessage() {}
+
+func (x *Version) ProtoReflect() protoreflect.Message {
+ mi := &file_extensions_extension_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Version proto.InternalMessageInfo
+// Deprecated: Use Version.ProtoReflect.Descriptor instead.
+func (*Version) Descriptor() ([]byte, []int) {
+ return file_extensions_extension_proto_rawDescGZIP(), []int{0}
+}
-func (m *Version) GetMajor() int32 {
- if m != nil {
- return m.Major
+func (x *Version) GetMajor() int32 {
+ if x != nil {
+ return x.Major
}
return 0
}
-func (m *Version) GetMinor() int32 {
- if m != nil {
- return m.Minor
+func (x *Version) GetMinor() int32 {
+ if x != nil {
+ return x.Minor
}
return 0
}
-func (m *Version) GetPatch() int32 {
- if m != nil {
- return m.Patch
+func (x *Version) GetPatch() int32 {
+ if x != nil {
+ return x.Patch
}
return 0
}
-func (m *Version) GetSuffix() string {
- if m != nil {
- return m.Suffix
+func (x *Version) GetSuffix() string {
+ if x != nil {
+ return x.Suffix
}
return ""
}
// An encoded Request is written to the ExtensionHandler's stdin.
type ExtensionHandlerRequest struct {
- // The OpenAPI descriptions that were explicitly listed on the command line.
- // The specifications will appear in the order they are specified to gnostic.
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The extension to process.
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"`
- // The version number of openapi compiler.
- CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ // The version number of Gnostic.
+ CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
}
-func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
-func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
-func (*ExtensionHandlerRequest) ProtoMessage() {}
-func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_661e47e790f76671, []int{1}
+func (x *ExtensionHandlerRequest) Reset() {
+ *x = ExtensionHandlerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_extensions_extension_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b)
-}
-func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
-}
-func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src)
-}
-func (m *ExtensionHandlerRequest) XXX_Size() int {
- return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
+func (x *ExtensionHandlerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m)
+
+func (*ExtensionHandlerRequest) ProtoMessage() {}
+
+func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_extensions_extension_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo
+// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead.
+func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
+ return file_extensions_extension_proto_rawDescGZIP(), []int{1}
+}
-func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
- if m != nil {
- return m.Wrapper
+func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper {
+ if x != nil {
+ return x.Wrapper
}
return nil
}
-func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
- if m != nil {
- return m.CompilerVersion
+func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version {
+ if x != nil {
+ return x.CompilerVersion
}
return nil
}
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
type ExtensionHandlerResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// true if the extension is handled by the extension handler; false otherwise
Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"`
- // Error message. If non-empty, the extension handling failed.
+ // Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero
// even if it reports an error in this way.
//
@@ -151,150 +184,278 @@ type ExtensionHandlerResponse struct {
// itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero
// status code.
- Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"`
+ Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
// text output
- Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
-func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
-func (*ExtensionHandlerResponse) ProtoMessage() {}
-func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_661e47e790f76671, []int{2}
+func (x *ExtensionHandlerResponse) Reset() {
+ *x = ExtensionHandlerResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_extensions_extension_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b)
-}
-func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
-}
-func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src)
+func (x *ExtensionHandlerResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ExtensionHandlerResponse) XXX_Size() int {
- return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
-}
-func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m)
+
+func (*ExtensionHandlerResponse) ProtoMessage() {}
+
+func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_extensions_extension_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo
+// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead.
+func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
+ return file_extensions_extension_proto_rawDescGZIP(), []int{2}
+}
-func (m *ExtensionHandlerResponse) GetHandled() bool {
- if m != nil {
- return m.Handled
+func (x *ExtensionHandlerResponse) GetHandled() bool {
+ if x != nil {
+ return x.Handled
}
return false
}
-func (m *ExtensionHandlerResponse) GetError() []string {
- if m != nil {
- return m.Error
+func (x *ExtensionHandlerResponse) GetErrors() []string {
+ if x != nil {
+ return x.Errors
}
return nil
}
-func (m *ExtensionHandlerResponse) GetValue() *any.Any {
- if m != nil {
- return m.Value
+func (x *ExtensionHandlerResponse) GetValue() *anypb.Any {
+ if x != nil {
+ return x.Value
}
return nil
}
type Wrapper struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// version of the OpenAPI specification in which this extension was written.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
- // Name of the extension
+ // Name of the extension.
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"`
- // Must be a valid yaml for the proto
- Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ // YAML-formatted extension value.
+ Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
}
-func (m *Wrapper) Reset() { *m = Wrapper{} }
-func (m *Wrapper) String() string { return proto.CompactTextString(m) }
-func (*Wrapper) ProtoMessage() {}
-func (*Wrapper) Descriptor() ([]byte, []int) {
- return fileDescriptor_661e47e790f76671, []int{3}
+func (x *Wrapper) Reset() {
+ *x = Wrapper{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_extensions_extension_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Wrapper) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Wrapper.Unmarshal(m, b)
+func (x *Wrapper) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
-}
-func (m *Wrapper) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Wrapper.Merge(m, src)
-}
-func (m *Wrapper) XXX_Size() int {
- return xxx_messageInfo_Wrapper.Size(m)
-}
-func (m *Wrapper) XXX_DiscardUnknown() {
- xxx_messageInfo_Wrapper.DiscardUnknown(m)
+
+func (*Wrapper) ProtoMessage() {}
+
+func (x *Wrapper) ProtoReflect() protoreflect.Message {
+ mi := &file_extensions_extension_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Wrapper proto.InternalMessageInfo
+// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead.
+func (*Wrapper) Descriptor() ([]byte, []int) {
+ return file_extensions_extension_proto_rawDescGZIP(), []int{3}
+}
-func (m *Wrapper) GetVersion() string {
- if m != nil {
- return m.Version
+func (x *Wrapper) GetVersion() string {
+ if x != nil {
+ return x.Version
}
return ""
}
-func (m *Wrapper) GetExtensionName() string {
- if m != nil {
- return m.ExtensionName
+func (x *Wrapper) GetExtensionName() string {
+ if x != nil {
+ return x.ExtensionName
}
return ""
}
-func (m *Wrapper) GetYaml() string {
- if m != nil {
- return m.Yaml
+func (x *Wrapper) GetYaml() string {
+ if x != nil {
+ return x.Yaml
}
return ""
}
-func init() {
- proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
- proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
- proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
- proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
-}
-
-func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) }
-
-var fileDescriptor_661e47e790f76671 = []byte{
- // 362 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40,
- 0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50,
- 0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7,
- 0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85,
- 0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f,
- 0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71,
- 0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e,
- 0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d,
- 0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef,
- 0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35,
- 0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14,
- 0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39,
- 0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08,
- 0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81,
- 0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3,
- 0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8,
- 0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35,
- 0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0,
- 0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18,
- 0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09,
- 0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d,
- 0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00,
- 0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00,
+var File_extensions_extension_proto protoreflect.FileDescriptor
+
+var file_extensions_extension_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e,
+ 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a,
+ 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14,
+ 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d,
+ 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75,
+ 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66,
+ 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37,
+ 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07,
+ 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69,
+ 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a,
+ 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
+ 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12,
+ 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57,
+ 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f,
+ 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47,
+ 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50,
+ 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b,
+ 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_extensions_extension_proto_rawDescOnce sync.Once
+ file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc
+)
+
+func file_extensions_extension_proto_rawDescGZIP() []byte {
+ file_extensions_extension_proto_rawDescOnce.Do(func() {
+ file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData)
+ })
+ return file_extensions_extension_proto_rawDescData
+}
+
+var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_extensions_extension_proto_goTypes = []interface{}{
+ (*Version)(nil), // 0: gnostic.extension.v1.Version
+ (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
+ (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
+ (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper
+ (*anypb.Any)(nil), // 4: google.protobuf.Any
+}
+var file_extensions_extension_proto_depIdxs = []int32{
+ 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper
+ 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version
+ 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_extensions_extension_proto_init() }
+func file_extensions_extension_proto_init() {
+ if File_extensions_extension_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtensionHandlerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtensionHandlerResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Wrapper); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_extensions_extension_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_extensions_extension_proto_goTypes,
+ DependencyIndexes: file_extensions_extension_proto_depIdxs,
+ MessageInfos: file_extensions_extension_proto_msgTypes,
+ }.Build()
+ File_extensions_extension_proto = out.File
+ file_extensions_extension_proto_rawDesc = nil
+ file_extensions_extension_proto_goTypes = nil
+ file_extensions_extension_proto_depIdxs = nil
}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
index 04856f913..875137c1a 100644
--- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto
+++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,9 @@
syntax = "proto3";
+package gnostic.extension.v1;
+
import "google/protobuf/any.proto";
-package openapiextension.v1;
// This option lets the proto compiler generate Java code inside the package
// name (see below) instead of inside an outer class. It creates a simpler
@@ -26,7 +27,7 @@ option java_multiple_files = true;
// The Java outer classname should be the filename in UpperCamelCase. This
// class is only used to hold proto descriptor, so developers don't need to
// work with it directly.
-option java_outer_classname = "OpenAPIExtensionV1";
+option java_outer_classname = "GnosticExtension";
// The Java package name must be proto package name with proper prefix.
option java_package = "org.gnostic.v1";
@@ -37,9 +38,13 @@ option java_package = "org.gnostic.v1";
// hopefully unique enough to not conflict with things that may come along in
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
//
-option objc_class_prefix = "OAE"; // "OpenAPI Extension"
+// "Gnostic Extension"
+option objc_class_prefix = "GNX";
+
+// The Go package name.
+option go_package = "./extensions;gnostic_extension_v1";
-// The version number of OpenAPI compiler.
+// The version number of Gnostic.
message Version {
int32 major = 1;
int32 minor = 2;
@@ -52,12 +57,11 @@ message Version {
// An encoded Request is written to the ExtensionHandler's stdin.
message ExtensionHandlerRequest {
- // The OpenAPI descriptions that were explicitly listed on the command line.
- // The specifications will appear in the order they are specified to gnostic.
+ // The extension to process.
Wrapper wrapper = 1;
- // The version number of openapi compiler.
- Version compiler_version = 3;
+ // The version number of Gnostic.
+ Version compiler_version = 2;
}
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
@@ -66,7 +70,7 @@ message ExtensionHandlerResponse {
// true if the extension is handled by the extension handler; false otherwise
bool handled = 1;
- // Error message. If non-empty, the extension handling failed.
+ // Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero
// even if it reports an error in this way.
//
@@ -75,7 +79,7 @@ message ExtensionHandlerResponse {
// itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero
// status code.
- repeated string error = 2;
+ repeated string errors = 2;
// text output
google.protobuf.Any value = 3;
@@ -85,9 +89,9 @@ message Wrapper {
// version of the OpenAPI specification in which this extension was written.
string version = 1;
- // Name of the extension
+ // Name of the extension.
string extension_name = 2;
- // Must be a valid yaml for the proto
+ // YAML-formatted extension value.
string yaml = 3;
}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go
index 94a8e62a7..ec8afd009 100644
--- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go
+++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,71 +12,53 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package openapiextension_v1
+package gnostic_extension_v1
import (
- "fmt"
"io/ioutil"
+ "log"
"os"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
)
-type documentHandler func(version string, extensionName string, document string)
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
-func forInputYamlFromOpenapic(handler documentHandler) {
+// Main implements the main program of an extension handler.
+func Main(handler extensionHandler) {
+ // unpack the request
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
- fmt.Println("File error:", err.Error())
+ log.Println("File error:", err.Error())
os.Exit(1)
}
if len(data) == 0 {
- fmt.Println("No input data.")
+ log.Println("No input data.")
os.Exit(1)
}
request := &ExtensionHandlerRequest{}
err = proto.Unmarshal(data, request)
if err != nil {
- fmt.Println("Input error:", err.Error())
+ log.Println("Input error:", err.Error())
os.Exit(1)
}
- handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
-}
-
-// ProcessExtension calles the handler for a specified extension.
-func ProcessExtension(handleExtension extensionHandler) {
- response := &ExtensionHandlerResponse{}
- forInputYamlFromOpenapic(
- func(version string, extensionName string, yamlInput string) {
- var newObject proto.Message
- var err error
-
- handled, newObject, err := handleExtension(extensionName, yamlInput)
- if !handled {
- responseBytes, _ := proto.Marshal(response)
- os.Stdout.Write(responseBytes)
- os.Exit(0)
- }
-
- // If we reach here, then the extension is handled
- response.Handled = true
- if err != nil {
- response.Error = append(response.Error, err.Error())
- responseBytes, _ := proto.Marshal(response)
- os.Stdout.Write(responseBytes)
- os.Exit(0)
- }
- response.Value, err = ptypes.MarshalAny(newObject)
- if err != nil {
- response.Error = append(response.Error, err.Error())
- responseBytes, _ := proto.Marshal(response)
- os.Stdout.Write(responseBytes)
- os.Exit(0)
- }
- })
-
+ // call the handler
+ handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml)
+ // respond with the output of the handler
+ response := &ExtensionHandlerResponse{
+ Handled: false, // default assumption
+ Errors: make([]string, 0),
+ }
+ if err != nil {
+ response.Errors = append(response.Errors, err.Error())
+ } else if handled {
+ response.Handled = true
+ response.Value, err = ptypes.MarshalAny(output)
+ if err != nil {
+ response.Errors = append(response.Errors, err.Error())
+ }
+ }
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
}
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/vendor/github.com/googleapis/gnostic/jsonschema/README.md
new file mode 100644
index 000000000..6793c5179
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/README.md
@@ -0,0 +1,4 @@
+# jsonschema
+
+This directory contains code for reading, writing, and manipulating JSON
+schemas.
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/vendor/github.com/googleapis/gnostic/jsonschema/base.go
new file mode 100644
index 000000000..0af8b148b
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/base.go
@@ -0,0 +1,84 @@
+
+// THIS FILE IS AUTOMATICALLY GENERATED.
+
+package jsonschema
+
+import (
+ "encoding/base64"
+)
+
+func baseSchemaBytes() ([]byte, error){
+ return base64.StdEncoding.DecodeString(
+`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi
+JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl
+c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK
+ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg
+ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg
+ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp
+bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp
+dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl
+ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK
+ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v
+bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg
+ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki
+LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p
+bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s
+CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog
+ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg
+ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog
+ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg
+ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog
+ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6
+IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog
+ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1
+ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl
+ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw
+ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg
+ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg
+ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg
+ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg
+IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0
+aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg
+ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg
+ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg
+ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK
+ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi
+ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP
+ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy
+ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg
+ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv
+ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi
+OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl
+SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs
+dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k
+ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk
+cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl
+cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh
+ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg
+ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg
+ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk
+ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk
+ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6
+IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi
+b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9
+LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl
+cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv
+bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog
+ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq
+ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg
+ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg
+ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg
+ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg
+ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu
+aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh
+bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl
+cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs
+CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs
+ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg
+ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg
+ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy
+YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5
+IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg
+fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6
+IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1
+c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}
\ No newline at end of file
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/vendor/github.com/googleapis/gnostic/jsonschema/display.go
new file mode 100644
index 000000000..028a760a9
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/display.go
@@ -0,0 +1,229 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jsonschema
+
+import (
+ "fmt"
+ "strings"
+)
+
+//
+// DISPLAY
+// The following methods display Schemas.
+//
+
+// Description returns a string representation of a string or string array.
+func (s *StringOrStringArray) Description() string {
+ if s.String != nil {
+ return *s.String
+ }
+ if s.StringArray != nil {
+ return strings.Join(*s.StringArray, ", ")
+ }
+ return ""
+}
+
+// Returns a string representation of a Schema.
+func (schema *Schema) String() string {
+ return schema.describeSchema("")
+}
+
+// Helper: Returns a string representation of a Schema indented by a specified string.
+func (schema *Schema) describeSchema(indent string) string {
+ result := ""
+ if schema.Schema != nil {
+ result += indent + "$schema: " + *(schema.Schema) + "\n"
+ }
+ if schema.ID != nil {
+ result += indent + "id: " + *(schema.ID) + "\n"
+ }
+ if schema.MultipleOf != nil {
+ result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf))
+ }
+ if schema.Maximum != nil {
+ result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum))
+ }
+ if schema.ExclusiveMaximum != nil {
+ result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum))
+ }
+ if schema.Minimum != nil {
+ result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum))
+ }
+ if schema.ExclusiveMinimum != nil {
+ result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum))
+ }
+ if schema.MaxLength != nil {
+ result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength))
+ }
+ if schema.MinLength != nil {
+ result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength))
+ }
+ if schema.Pattern != nil {
+ result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern))
+ }
+ if schema.AdditionalItems != nil {
+ s := schema.AdditionalItems.Schema
+ if s != nil {
+ result += indent + "additionalItems:\n"
+ result += s.describeSchema(indent + " ")
+ } else {
+ b := *(schema.AdditionalItems.Boolean)
+ result += indent + fmt.Sprintf("additionalItems: %+v\n", b)
+ }
+ }
+ if schema.Items != nil {
+ result += indent + "items:\n"
+ items := schema.Items
+ if items.SchemaArray != nil {
+ for i, s := range *(items.SchemaArray) {
+ result += indent + " " + fmt.Sprintf("%d", i) + ":\n"
+ result += s.describeSchema(indent + " " + " ")
+ }
+ } else if items.Schema != nil {
+ result += items.Schema.describeSchema(indent + " " + " ")
+ }
+ }
+ if schema.MaxItems != nil {
+ result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems))
+ }
+ if schema.MinItems != nil {
+ result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems))
+ }
+ if schema.UniqueItems != nil {
+ result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems))
+ }
+ if schema.MaxProperties != nil {
+ result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties))
+ }
+ if schema.MinProperties != nil {
+ result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties))
+ }
+ if schema.Required != nil {
+ result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required))
+ }
+ if schema.AdditionalProperties != nil {
+ s := schema.AdditionalProperties.Schema
+ if s != nil {
+ result += indent + "additionalProperties:\n"
+ result += s.describeSchema(indent + " ")
+ } else {
+ b := *(schema.AdditionalProperties.Boolean)
+ result += indent + fmt.Sprintf("additionalProperties: %+v\n", b)
+ }
+ }
+ if schema.Properties != nil {
+ result += indent + "properties:\n"
+ for _, pair := range *(schema.Properties) {
+ name := pair.Name
+ s := pair.Value
+ result += indent + " " + name + ":\n"
+ result += s.describeSchema(indent + " " + " ")
+ }
+ }
+ if schema.PatternProperties != nil {
+ result += indent + "patternProperties:\n"
+ for _, pair := range *(schema.PatternProperties) {
+ name := pair.Name
+ s := pair.Value
+ result += indent + " " + name + ":\n"
+ result += s.describeSchema(indent + " " + " ")
+ }
+ }
+ if schema.Dependencies != nil {
+ result += indent + "dependencies:\n"
+ for _, pair := range *(schema.Dependencies) {
+ name := pair.Name
+ schemaOrStringArray := pair.Value
+ s := schemaOrStringArray.Schema
+ if s != nil {
+ result += indent + " " + name + ":\n"
+ result += s.describeSchema(indent + " " + " ")
+ } else {
+ a := schemaOrStringArray.StringArray
+ if a != nil {
+ result += indent + " " + name + ":\n"
+ for _, s2 := range *a {
+ result += indent + " " + " " + s2 + "\n"
+ }
+ }
+ }
+
+ }
+ }
+ if schema.Enumeration != nil {
+ result += indent + "enumeration:\n"
+ for _, value := range *(schema.Enumeration) {
+ if value.String != nil {
+ result += indent + " " + fmt.Sprintf("%+v\n", *value.String)
+ } else {
+ result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool)
+ }
+ }
+ }
+ if schema.Type != nil {
+ result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description())
+ }
+ if schema.AllOf != nil {
+ result += indent + "allOf:\n"
+ for _, s := range *(schema.AllOf) {
+ result += s.describeSchema(indent + " ")
+ result += indent + "-\n"
+ }
+ }
+ if schema.AnyOf != nil {
+ result += indent + "anyOf:\n"
+ for _, s := range *(schema.AnyOf) {
+ result += s.describeSchema(indent + " ")
+ result += indent + "-\n"
+ }
+ }
+ if schema.OneOf != nil {
+ result += indent + "oneOf:\n"
+ for _, s := range *(schema.OneOf) {
+ result += s.describeSchema(indent + " ")
+ result += indent + "-\n"
+ }
+ }
+ if schema.Not != nil {
+ result += indent + "not:\n"
+ result += schema.Not.describeSchema(indent + " ")
+ }
+ if schema.Definitions != nil {
+ result += indent + "definitions:\n"
+ for _, pair := range *(schema.Definitions) {
+ name := pair.Name
+ s := pair.Value
+ result += indent + " " + name + ":\n"
+ result += s.describeSchema(indent + " " + " ")
+ }
+ }
+ if schema.Title != nil {
+ result += indent + "title: " + *(schema.Title) + "\n"
+ }
+ if schema.Description != nil {
+ result += indent + "description: " + *(schema.Description) + "\n"
+ }
+ if schema.Default != nil {
+ result += indent + "default:\n"
+ result += indent + fmt.Sprintf(" %+v\n", *(schema.Default))
+ }
+ if schema.Format != nil {
+ result += indent + "format: " + *(schema.Format) + "\n"
+ }
+ if schema.Ref != nil {
+ result += indent + "$ref: " + *(schema.Ref) + "\n"
+ }
+ return result
+}
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/vendor/github.com/googleapis/gnostic/jsonschema/models.go
new file mode 100644
index 000000000..4781bdc5f
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/models.go
@@ -0,0 +1,228 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package jsonschema supports the reading, writing, and manipulation
+// of JSON Schemas.
+package jsonschema
+
+import "gopkg.in/yaml.v3"
+
+// The Schema struct models a JSON Schema and, because schemas are
+// defined hierarchically, contains many references to itself.
+// All fields are pointers and are nil if the associated values
+// are not specified.
+type Schema struct {
+ Schema *string // $schema
+ ID *string // id keyword used for $ref resolution scope
+ Ref *string // $ref, i.e. JSON Pointers
+
+ // http://json-schema.org/latest/json-schema-validation.html
+ // 5.1. Validation keywords for numeric instances (number and integer)
+ MultipleOf *SchemaNumber
+ Maximum *SchemaNumber
+ ExclusiveMaximum *bool
+ Minimum *SchemaNumber
+ ExclusiveMinimum *bool
+
+ // 5.2. Validation keywords for strings
+ MaxLength *int64
+ MinLength *int64
+ Pattern *string
+
+ // 5.3. Validation keywords for arrays
+ AdditionalItems *SchemaOrBoolean
+ Items *SchemaOrSchemaArray
+ MaxItems *int64
+ MinItems *int64
+ UniqueItems *bool
+
+ // 5.4. Validation keywords for objects
+ MaxProperties *int64
+ MinProperties *int64
+ Required *[]string
+ AdditionalProperties *SchemaOrBoolean
+ Properties *[]*NamedSchema
+ PatternProperties *[]*NamedSchema
+ Dependencies *[]*NamedSchemaOrStringArray
+
+ // 5.5. Validation keywords for any instance type
+ Enumeration *[]SchemaEnumValue
+ Type *StringOrStringArray
+ AllOf *[]*Schema
+ AnyOf *[]*Schema
+ OneOf *[]*Schema
+ Not *Schema
+ Definitions *[]*NamedSchema
+
+ // 6. Metadata keywords
+ Title *string
+ Description *string
+ Default *yaml.Node
+
+ // 7. Semantic validation with "format"
+ Format *string
+}
+
+// These helper structs represent "combination" types that generally can
+// have values of one type or another. All are used to represent parts
+// of Schemas.
+
+// SchemaNumber represents a value that can be either an Integer or a Float.
+type SchemaNumber struct {
+ Integer *int64
+ Float *float64
+}
+
+// NewSchemaNumberWithInteger creates and returns a new object
+func NewSchemaNumberWithInteger(i int64) *SchemaNumber {
+ result := &SchemaNumber{}
+ result.Integer = &i
+ return result
+}
+
+// NewSchemaNumberWithFloat creates and returns a new object
+func NewSchemaNumberWithFloat(f float64) *SchemaNumber {
+ result := &SchemaNumber{}
+ result.Float = &f
+ return result
+}
+
+// SchemaOrBoolean represents a value that can be either a Schema or a Boolean.
+type SchemaOrBoolean struct {
+ Schema *Schema
+ Boolean *bool
+}
+
+// NewSchemaOrBooleanWithSchema creates and returns a new object
+func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean {
+ result := &SchemaOrBoolean{}
+ result.Schema = s
+ return result
+}
+
+// NewSchemaOrBooleanWithBoolean creates and returns a new object
+func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean {
+ result := &SchemaOrBoolean{}
+ result.Boolean = &b
+ return result
+}
+
+// StringOrStringArray represents a value that can be either
+// a String or an Array of Strings.
+type StringOrStringArray struct {
+ String *string
+ StringArray *[]string
+}
+
+// NewStringOrStringArrayWithString creates and returns a new object
+func NewStringOrStringArrayWithString(s string) *StringOrStringArray {
+ result := &StringOrStringArray{}
+ result.String = &s
+ return result
+}
+
+// NewStringOrStringArrayWithStringArray creates and returns a new object
+func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray {
+ result := &StringOrStringArray{}
+ result.StringArray = &a
+ return result
+}
+
+// SchemaOrStringArray represents a value that can be either
+// a Schema or an Array of Strings.
+type SchemaOrStringArray struct {
+ Schema *Schema
+ StringArray *[]string
+}
+
+// SchemaOrSchemaArray represents a value that can be either
+// a Schema or an Array of Schemas.
+type SchemaOrSchemaArray struct {
+ Schema *Schema
+ SchemaArray *[]*Schema
+}
+
+// NewSchemaOrSchemaArrayWithSchema creates and returns a new object
+func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray {
+ result := &SchemaOrSchemaArray{}
+ result.Schema = s
+ return result
+}
+
+// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object
+func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray {
+ result := &SchemaOrSchemaArray{}
+ result.SchemaArray = &a
+ return result
+}
+
+// SchemaEnumValue represents a value that can be part of an
+// enumeration in a Schema.
+type SchemaEnumValue struct {
+ String *string
+ Bool *bool
+}
+
+// NamedSchema is a name-value pair that is used to emulate maps
+// with ordered keys.
+type NamedSchema struct {
+ Name string
+ Value *Schema
+}
+
+// NewNamedSchema creates and returns a new object
+func NewNamedSchema(name string, value *Schema) *NamedSchema {
+ return &NamedSchema{Name: name, Value: value}
+}
+
+// NamedSchemaOrStringArray is a name-value pair that is used
+// to emulate maps with ordered keys.
+type NamedSchemaOrStringArray struct {
+ Name string
+ Value *SchemaOrStringArray
+}
+
+// Access named subschemas by name
+
+func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema {
+ if array == nil {
+ return nil
+ }
+ for _, pair := range *array {
+ if pair.Name == name {
+ return pair.Value
+ }
+ }
+ return nil
+}
+
+// PropertyWithName returns the selected element.
+func (s *Schema) PropertyWithName(name string) *Schema {
+ return namedSchemaArrayElementWithName(s.Properties, name)
+}
+
+// PatternPropertyWithName returns the selected element.
+func (s *Schema) PatternPropertyWithName(name string) *Schema {
+ return namedSchemaArrayElementWithName(s.PatternProperties, name)
+}
+
+// DefinitionWithName returns the selected element.
+func (s *Schema) DefinitionWithName(name string) *Schema {
+ return namedSchemaArrayElementWithName(s.Definitions, name)
+}
+
+// AddProperty adds a named property.
+func (s *Schema) AddProperty(name string, property *Schema) {
+ *s.Properties = append(*s.Properties, NewNamedSchema(name, property))
+}
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go
new file mode 100644
index 000000000..ba8dd4a91
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go
@@ -0,0 +1,394 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jsonschema
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+//
+// OPERATIONS
+// The following methods perform operations on Schemas.
+//
+
+// IsEmpty returns true if no members of the Schema are specified.
+func (schema *Schema) IsEmpty() bool {
+ return (schema.Schema == nil) &&
+ (schema.ID == nil) &&
+ (schema.MultipleOf == nil) &&
+ (schema.Maximum == nil) &&
+ (schema.ExclusiveMaximum == nil) &&
+ (schema.Minimum == nil) &&
+ (schema.ExclusiveMinimum == nil) &&
+ (schema.MaxLength == nil) &&
+ (schema.MinLength == nil) &&
+ (schema.Pattern == nil) &&
+ (schema.AdditionalItems == nil) &&
+ (schema.Items == nil) &&
+ (schema.MaxItems == nil) &&
+ (schema.MinItems == nil) &&
+ (schema.UniqueItems == nil) &&
+ (schema.MaxProperties == nil) &&
+ (schema.MinProperties == nil) &&
+ (schema.Required == nil) &&
+ (schema.AdditionalProperties == nil) &&
+ (schema.Properties == nil) &&
+ (schema.PatternProperties == nil) &&
+ (schema.Dependencies == nil) &&
+ (schema.Enumeration == nil) &&
+ (schema.Type == nil) &&
+ (schema.AllOf == nil) &&
+ (schema.AnyOf == nil) &&
+ (schema.OneOf == nil) &&
+ (schema.Not == nil) &&
+ (schema.Definitions == nil) &&
+ (schema.Title == nil) &&
+ (schema.Description == nil) &&
+ (schema.Default == nil) &&
+ (schema.Format == nil) &&
+ (schema.Ref == nil)
+}
+
+// IsEqual returns true if two schemas are equal.
+func (schema *Schema) IsEqual(schema2 *Schema) bool {
+ return schema.String() == schema2.String()
+}
+
+// SchemaOperation represents a function that can be applied to a Schema.
+type SchemaOperation func(schema *Schema, context string)
+
+// Applies a specified function to a Schema and all of the Schemas that it contains.
+func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) {
+
+ if schema.AdditionalItems != nil {
+ s := schema.AdditionalItems.Schema
+ if s != nil {
+ s.applyToSchemas(operation, "AdditionalItems")
+ }
+ }
+
+ if schema.Items != nil {
+ if schema.Items.SchemaArray != nil {
+ for _, s := range *(schema.Items.SchemaArray) {
+ s.applyToSchemas(operation, "Items.SchemaArray")
+ }
+ } else if schema.Items.Schema != nil {
+ schema.Items.Schema.applyToSchemas(operation, "Items.Schema")
+ }
+ }
+
+ if schema.AdditionalProperties != nil {
+ s := schema.AdditionalProperties.Schema
+ if s != nil {
+ s.applyToSchemas(operation, "AdditionalProperties")
+ }
+ }
+
+ if schema.Properties != nil {
+ for _, pair := range *(schema.Properties) {
+ s := pair.Value
+ s.applyToSchemas(operation, "Properties")
+ }
+ }
+ if schema.PatternProperties != nil {
+ for _, pair := range *(schema.PatternProperties) {
+ s := pair.Value
+ s.applyToSchemas(operation, "PatternProperties")
+ }
+ }
+
+ if schema.Dependencies != nil {
+ for _, pair := range *(schema.Dependencies) {
+ schemaOrStringArray := pair.Value
+ s := schemaOrStringArray.Schema
+ if s != nil {
+ s.applyToSchemas(operation, "Dependencies")
+ }
+ }
+ }
+
+ if schema.AllOf != nil {
+ for _, s := range *(schema.AllOf) {
+ s.applyToSchemas(operation, "AllOf")
+ }
+ }
+ if schema.AnyOf != nil {
+ for _, s := range *(schema.AnyOf) {
+ s.applyToSchemas(operation, "AnyOf")
+ }
+ }
+ if schema.OneOf != nil {
+ for _, s := range *(schema.OneOf) {
+ s.applyToSchemas(operation, "OneOf")
+ }
+ }
+ if schema.Not != nil {
+ schema.Not.applyToSchemas(operation, "Not")
+ }
+
+ if schema.Definitions != nil {
+ for _, pair := range *(schema.Definitions) {
+ s := pair.Value
+ s.applyToSchemas(operation, "Definitions")
+ }
+ }
+
+ operation(schema, context)
+}
+
+// CopyProperties copies all non-nil properties from the source Schema to the schema Schema.
+func (schema *Schema) CopyProperties(source *Schema) {
+ if source.Schema != nil {
+ schema.Schema = source.Schema
+ }
+ if source.ID != nil {
+ schema.ID = source.ID
+ }
+ if source.MultipleOf != nil {
+ schema.MultipleOf = source.MultipleOf
+ }
+ if source.Maximum != nil {
+ schema.Maximum = source.Maximum
+ }
+ if source.ExclusiveMaximum != nil {
+ schema.ExclusiveMaximum = source.ExclusiveMaximum
+ }
+ if source.Minimum != nil {
+ schema.Minimum = source.Minimum
+ }
+ if source.ExclusiveMinimum != nil {
+ schema.ExclusiveMinimum = source.ExclusiveMinimum
+ }
+ if source.MaxLength != nil {
+ schema.MaxLength = source.MaxLength
+ }
+ if source.MinLength != nil {
+ schema.MinLength = source.MinLength
+ }
+ if source.Pattern != nil {
+ schema.Pattern = source.Pattern
+ }
+ if source.AdditionalItems != nil {
+ schema.AdditionalItems = source.AdditionalItems
+ }
+ if source.Items != nil {
+ schema.Items = source.Items
+ }
+ if source.MaxItems != nil {
+ schema.MaxItems = source.MaxItems
+ }
+ if source.MinItems != nil {
+ schema.MinItems = source.MinItems
+ }
+ if source.UniqueItems != nil {
+ schema.UniqueItems = source.UniqueItems
+ }
+ if source.MaxProperties != nil {
+ schema.MaxProperties = source.MaxProperties
+ }
+ if source.MinProperties != nil {
+ schema.MinProperties = source.MinProperties
+ }
+ if source.Required != nil {
+ schema.Required = source.Required
+ }
+ if source.AdditionalProperties != nil {
+ schema.AdditionalProperties = source.AdditionalProperties
+ }
+ if source.Properties != nil {
+ schema.Properties = source.Properties
+ }
+ if source.PatternProperties != nil {
+ schema.PatternProperties = source.PatternProperties
+ }
+ if source.Dependencies != nil {
+ schema.Dependencies = source.Dependencies
+ }
+ if source.Enumeration != nil {
+ schema.Enumeration = source.Enumeration
+ }
+ if source.Type != nil {
+ schema.Type = source.Type
+ }
+ if source.AllOf != nil {
+ schema.AllOf = source.AllOf
+ }
+ if source.AnyOf != nil {
+ schema.AnyOf = source.AnyOf
+ }
+ if source.OneOf != nil {
+ schema.OneOf = source.OneOf
+ }
+ if source.Not != nil {
+ schema.Not = source.Not
+ }
+ if source.Definitions != nil {
+ schema.Definitions = source.Definitions
+ }
+ if source.Title != nil {
+ schema.Title = source.Title
+ }
+ if source.Description != nil {
+ schema.Description = source.Description
+ }
+ if source.Default != nil {
+ schema.Default = source.Default
+ }
+ if source.Format != nil {
+ schema.Format = source.Format
+ }
+ if source.Ref != nil {
+ schema.Ref = source.Ref
+ }
+}
+
+// TypeIs returns true if the Type of a Schema includes the specified type
+func (schema *Schema) TypeIs(typeName string) bool {
+ if schema.Type != nil {
+ // the schema Type is either a string or an array of strings
+ if schema.Type.String != nil {
+ return (*(schema.Type.String) == typeName)
+ } else if schema.Type.StringArray != nil {
+ for _, n := range *(schema.Type.StringArray) {
+ if n == typeName {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// ResolveRefs resolves "$ref" elements in a Schema and its children.
+// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf,
+// the reference is kept and we expect downstream tools to separately model these
+// referenced schemas.
+func (schema *Schema) ResolveRefs() {
+ rootSchema := schema
+ count := 1
+ for count > 0 {
+ count = 0
+ schema.applyToSchemas(
+ func(schema *Schema, context string) {
+ if schema.Ref != nil {
+ resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref))
+ if err != nil {
+ log.Printf("%+v", err)
+ } else if resolvedRef.TypeIs("object") {
+ // don't substitute for objects, we'll model the referenced schema with a class
+ } else if context == "OneOf" {
+ // don't substitute for references inside oneOf declarations
+ } else if resolvedRef.OneOf != nil {
+ // don't substitute for references that contain oneOf declarations
+ } else if resolvedRef.AdditionalProperties != nil {
+ // don't substitute for references that look like objects
+ } else {
+ schema.Ref = nil
+ schema.CopyProperties(resolvedRef)
+ count++
+ }
+ }
+ }, "")
+ }
+}
+
+// resolveJSONPointer resolves JSON pointers.
+// This current implementation is very crude and custom for OpenAPI 2.0 schemas.
+// It panics for any pointer that it is unable to resolve.
+func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) {
+ parts := strings.Split(ref, "#")
+ if len(parts) == 2 {
+ documentName := parts[0] + "#"
+ if documentName == "#" && schema.ID != nil {
+ documentName = *(schema.ID)
+ }
+ path := parts[1]
+ document := schemas[documentName]
+ pathParts := strings.Split(path, "/")
+
+ // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases
+ if len(pathParts) == 1 {
+ return document, nil
+ } else if len(pathParts) == 3 {
+ switch pathParts[1] {
+ case "definitions":
+ dictionary := document.Definitions
+ for _, pair := range *dictionary {
+ if pair.Name == pathParts[2] {
+ result = pair.Value
+ }
+ }
+ case "properties":
+ dictionary := document.Properties
+ for _, pair := range *dictionary {
+ if pair.Name == pathParts[2] {
+ result = pair.Value
+ }
+ }
+ default:
+ break
+ }
+ }
+ }
+ if result == nil {
+ return nil, fmt.Errorf("unresolved pointer: %+v", ref)
+ }
+ return result, nil
+}
+
+// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema.
+func (schema *Schema) ResolveAllOfs() {
+ schema.applyToSchemas(
+ func(schema *Schema, context string) {
+ if schema.AllOf != nil {
+ for _, allOf := range *(schema.AllOf) {
+ schema.CopyProperties(allOf)
+ }
+ schema.AllOf = nil
+ }
+ }, "resolveAllOfs")
+}
+
+// ResolveAnyOfs replaces all "anyOf" elements with "oneOf".
+func (schema *Schema) ResolveAnyOfs() {
+ schema.applyToSchemas(
+ func(schema *Schema, context string) {
+ if schema.AnyOf != nil {
+ schema.OneOf = schema.AnyOf
+ schema.AnyOf = nil
+ }
+ }, "resolveAnyOfs")
+}
+
+// return a pointer to a copy of a passed-in string
+func stringptr(input string) (output *string) {
+ return &input
+}
+
+// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition
+func (schema *Schema) CopyOfficialSchemaProperty(name string) {
+ *schema.Properties = append(*schema.Properties,
+ NewNamedSchema(name,
+ &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)}))
+}
+
+// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition
+func (schema *Schema) CopyOfficialSchemaProperties(names []string) {
+ for _, name := range names {
+ schema.CopyOfficialSchemaProperty(name)
+ }
+}
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go
new file mode 100644
index 000000000..b8583d466
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go
@@ -0,0 +1,442 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate go run generate-base.go
+
+package jsonschema
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strconv"
+
+ "gopkg.in/yaml.v3"
+)
+
+// This is a global map of all known Schemas.
+// It is initialized when the first Schema is created and inserted.
+var schemas map[string]*Schema
+
+// NewBaseSchema builds a schema object from an embedded json representation.
+func NewBaseSchema() (schema *Schema, err error) {
+ b, err := baseSchemaBytes()
+ if err != nil {
+ return nil, err
+ }
+ var node yaml.Node
+ err = yaml.Unmarshal(b, &node)
+ if err != nil {
+ return nil, err
+ }
+ return NewSchemaFromObject(&node), nil
+}
+
+// NewSchemaFromFile reads a schema from a file.
+// Currently this assumes that schemas are stored in the source distribution of this project.
+func NewSchemaFromFile(filename string) (schema *Schema, err error) {
+ file, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ var node yaml.Node
+ err = yaml.Unmarshal(file, &node)
+ if err != nil {
+ return nil, err
+ }
+ return NewSchemaFromObject(&node), nil
+}
+
+// NewSchemaFromObject constructs a schema from a parsed JSON object.
+// Due to the complexity of the schema representation, this is a
+// custom reader and not the standard Go JSON reader (encoding/json).
+func NewSchemaFromObject(jsonData *yaml.Node) *Schema {
+ switch jsonData.Kind {
+ case yaml.DocumentNode:
+ return NewSchemaFromObject(jsonData.Content[0])
+ case yaml.MappingNode:
+ schema := &Schema{}
+
+ for i := 0; i < len(jsonData.Content); i += 2 {
+ k := jsonData.Content[i].Value
+ v := jsonData.Content[i+1]
+
+ switch k {
+ case "$schema":
+ schema.Schema = schema.stringValue(v)
+ case "id":
+ schema.ID = schema.stringValue(v)
+
+ case "multipleOf":
+ schema.MultipleOf = schema.numberValue(v)
+ case "maximum":
+ schema.Maximum = schema.numberValue(v)
+ case "exclusiveMaximum":
+ schema.ExclusiveMaximum = schema.boolValue(v)
+ case "minimum":
+ schema.Minimum = schema.numberValue(v)
+ case "exclusiveMinimum":
+ schema.ExclusiveMinimum = schema.boolValue(v)
+
+ case "maxLength":
+ schema.MaxLength = schema.intValue(v)
+ case "minLength":
+ schema.MinLength = schema.intValue(v)
+ case "pattern":
+ schema.Pattern = schema.stringValue(v)
+
+ case "additionalItems":
+ schema.AdditionalItems = schema.schemaOrBooleanValue(v)
+ case "items":
+ schema.Items = schema.schemaOrSchemaArrayValue(v)
+ case "maxItems":
+ schema.MaxItems = schema.intValue(v)
+ case "minItems":
+ schema.MinItems = schema.intValue(v)
+ case "uniqueItems":
+ schema.UniqueItems = schema.boolValue(v)
+
+ case "maxProperties":
+ schema.MaxProperties = schema.intValue(v)
+ case "minProperties":
+ schema.MinProperties = schema.intValue(v)
+ case "required":
+ schema.Required = schema.arrayOfStringsValue(v)
+ case "additionalProperties":
+ schema.AdditionalProperties = schema.schemaOrBooleanValue(v)
+ case "properties":
+ schema.Properties = schema.mapOfSchemasValue(v)
+ case "patternProperties":
+ schema.PatternProperties = schema.mapOfSchemasValue(v)
+ case "dependencies":
+ schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v)
+
+ case "enum":
+ schema.Enumeration = schema.arrayOfEnumValuesValue(v)
+
+ case "type":
+ schema.Type = schema.stringOrStringArrayValue(v)
+ case "allOf":
+ schema.AllOf = schema.arrayOfSchemasValue(v)
+ case "anyOf":
+ schema.AnyOf = schema.arrayOfSchemasValue(v)
+ case "oneOf":
+ schema.OneOf = schema.arrayOfSchemasValue(v)
+ case "not":
+ schema.Not = NewSchemaFromObject(v)
+ case "definitions":
+ schema.Definitions = schema.mapOfSchemasValue(v)
+
+ case "title":
+ schema.Title = schema.stringValue(v)
+ case "description":
+ schema.Description = schema.stringValue(v)
+
+ case "default":
+ schema.Default = v
+
+ case "format":
+ schema.Format = schema.stringValue(v)
+ case "$ref":
+ schema.Ref = schema.stringValue(v)
+ default:
+ fmt.Printf("UNSUPPORTED (%s)\n", k)
+ }
+ }
+
+ // insert schema in global map
+ if schema.ID != nil {
+ if schemas == nil {
+ schemas = make(map[string]*Schema, 0)
+ }
+ schemas[*(schema.ID)] = schema
+ }
+ return schema
+
+ default:
+ fmt.Printf("schemaValue: unexpected node %+v\n", jsonData)
+ return nil
+ }
+
+ return nil
+}
+
+//
+// BUILDERS
+// The following methods build elements of Schemas from interface{} values.
+// Each returns nil if it is unable to build the desired element.
+//
+
+// Gets the string value of an interface{} value if possible.
+func (schema *Schema) stringValue(v *yaml.Node) *string {
+ switch v.Kind {
+ case yaml.ScalarNode:
+ return &v.Value
+ default:
+ fmt.Printf("stringValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets the numeric value of an interface{} value if possible.
+func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber {
+ number := &SchemaNumber{}
+ switch v.Kind {
+ case yaml.ScalarNode:
+ switch v.Tag {
+ case "!!float":
+ v2, _ := strconv.ParseFloat(v.Value, 64)
+ number.Float = &v2
+ return number
+ case "!!int":
+ v2, _ := strconv.ParseInt(v.Value, 10, 64)
+ number.Integer = &v2
+ return number
+ default:
+ fmt.Printf("stringValue: unexpected node %+v\n", v)
+ }
+ default:
+ fmt.Printf("stringValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets the integer value of an interface{} value if possible.
+func (schema *Schema) intValue(v *yaml.Node) *int64 {
+ switch v.Kind {
+ case yaml.ScalarNode:
+ switch v.Tag {
+ case "!!float":
+ v2, _ := strconv.ParseFloat(v.Value, 64)
+ v3 := int64(v2)
+ return &v3
+ case "!!int":
+ v2, _ := strconv.ParseInt(v.Value, 10, 64)
+ return &v2
+ default:
+ fmt.Printf("intValue: unexpected node %+v\n", v)
+ }
+ default:
+ fmt.Printf("intValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets the bool value of an interface{} value if possible.
+func (schema *Schema) boolValue(v *yaml.Node) *bool {
+ switch v.Kind {
+ case yaml.ScalarNode:
+ switch v.Tag {
+ case "!!bool":
+ v2, _ := strconv.ParseBool(v.Value)
+ return &v2
+ default:
+ fmt.Printf("boolValue: unexpected node %+v\n", v)
+ }
+ default:
+ fmt.Printf("boolValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets a map of Schemas from an interface{} value if possible.
+func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema {
+ switch v.Kind {
+ case yaml.MappingNode:
+ m := make([]*NamedSchema, 0)
+ for i := 0; i < len(v.Content); i += 2 {
+ k2 := v.Content[i].Value
+ v2 := v.Content[i+1]
+ pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)}
+ m = append(m, pair)
+ }
+ return &m
+ default:
+ fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets an array of Schemas from an interface{} value if possible.
+func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema {
+ switch v.Kind {
+ case yaml.SequenceNode:
+ m := make([]*Schema, 0)
+ for _, v2 := range v.Content {
+ switch v2.Kind {
+ case yaml.MappingNode:
+ s := NewSchemaFromObject(v2)
+ m = append(m, s)
+ default:
+ fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2)
+ }
+ }
+ return &m
+ case yaml.MappingNode:
+ m := make([]*Schema, 0)
+ s := NewSchemaFromObject(v)
+ m = append(m, s)
+ return &m
+ default:
+ fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets a Schema or an array of Schemas from an interface{} value if possible.
+func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray {
+ switch v.Kind {
+ case yaml.SequenceNode:
+ m := make([]*Schema, 0)
+ for _, v2 := range v.Content {
+ switch v2.Kind {
+ case yaml.MappingNode:
+ s := NewSchemaFromObject(v2)
+ m = append(m, s)
+ default:
+ fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2)
+ }
+ }
+ return &SchemaOrSchemaArray{SchemaArray: &m}
+ case yaml.MappingNode:
+ s := NewSchemaFromObject(v)
+ return &SchemaOrSchemaArray{Schema: s}
+ default:
+ fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets an array of strings from an interface{} value if possible.
+func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string {
+ switch v.Kind {
+ case yaml.ScalarNode:
+ a := []string{v.Value}
+ return &a
+ case yaml.SequenceNode:
+ a := make([]string, 0)
+ for _, v2 := range v.Content {
+ switch v2.Kind {
+ case yaml.ScalarNode:
+ a = append(a, v2.Value)
+ default:
+ fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
+ }
+ }
+ return &a
+ default:
+ fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets a string or an array of strings from an interface{} value if possible.
+func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray {
+ switch v.Kind {
+ case yaml.ScalarNode:
+ s := &StringOrStringArray{}
+ s.String = &v.Value
+ return s
+ case yaml.SequenceNode:
+ a := make([]string, 0)
+ for _, v2 := range v.Content {
+ switch v2.Kind {
+ case yaml.ScalarNode:
+ a = append(a, v2.Value)
+ default:
+ fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
+ }
+ }
+ s := &StringOrStringArray{}
+ s.StringArray = &a
+ return s
+ default:
+ fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
+ }
+ return nil
+}
+
+// Gets an array of enum values from an interface{} value if possible.
+func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue {
+ a := make([]SchemaEnumValue, 0)
+ switch v.Kind {
+ case yaml.SequenceNode:
+ for _, v2 := range v.Content {
+ switch v2.Kind {
+ case yaml.ScalarNode:
+ switch v2.Tag {
+ case "!!str":
+ a = append(a, SchemaEnumValue{String: &v2.Value})
+ case "!!bool":
+ v3, _ := strconv.ParseBool(v2.Value)
+ a = append(a, SchemaEnumValue{Bool: &v3})
+ default:
+ fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag)
+ }
+ default:
+ fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2)
+ }
+ }
+ default:
+ fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v)
+ }
+ return &a
+}
+
+// Gets a map of schemas or string arrays from an interface{} value if possible.
+func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray {
+ m := make([]*NamedSchemaOrStringArray, 0)
+ switch v.Kind {
+ case yaml.MappingNode:
+ for i := 0; i < len(v.Content); i += 2 {
+ k2 := v.Content[i].Value
+ v2 := v.Content[i+1]
+ switch v2.Kind {
+ case yaml.SequenceNode:
+ a := make([]string, 0)
+ for _, v3 := range v2.Content {
+ switch v3.Kind {
+ case yaml.ScalarNode:
+ a = append(a, v3.Value)
+ default:
+ fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3)
+ }
+ }
+ s := &SchemaOrStringArray{}
+ s.StringArray = &a
+ pair := &NamedSchemaOrStringArray{Name: k2, Value: s}
+ m = append(m, pair)
+ default:
+ fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2)
+ }
+ }
+ default:
+ fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v)
+ }
+ return &m
+}
+
+// Gets a schema or a boolean value from an interface{} value if possible.
+func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean {
+ schemaOrBoolean := &SchemaOrBoolean{}
+ switch v.Kind {
+ case yaml.ScalarNode:
+ v2, _ := strconv.ParseBool(v.Value)
+ schemaOrBoolean.Boolean = &v2
+ case yaml.MappingNode:
+ schemaOrBoolean.Schema = NewSchemaFromObject(v)
+ default:
+ fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v)
+ }
+ return schemaOrBoolean
+}
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json
new file mode 100644
index 000000000..85eb502a6
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json
@@ -0,0 +1,150 @@
+{
+ "id": "http://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "#/definitions/positiveInteger" },
+ "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/positiveInteger" },
+ "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+}
diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go
new file mode 100644
index 000000000..340dc5f93
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go
@@ -0,0 +1,369 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jsonschema
+
+import (
+ "fmt"
+
+ "gopkg.in/yaml.v3"
+)
+
+const indentation = " "
+
+func renderMappingNode(node *yaml.Node, indent string) (result string) {
+ result = "{\n"
+ innerIndent := indent + indentation
+ for i := 0; i < len(node.Content); i += 2 {
+ // first print the key
+ key := node.Content[i].Value
+ result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key)
+ // then the value
+ value := node.Content[i+1]
+ switch value.Kind {
+ case yaml.ScalarNode:
+ result += "\"" + value.Value + "\""
+ case yaml.MappingNode:
+ result += renderMappingNode(value, innerIndent)
+ case yaml.SequenceNode:
+ result += renderSequenceNode(value, innerIndent)
+ default:
+ result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value)
+ }
+ if i < len(node.Content)-2 {
+ result += ","
+ }
+ result += "\n"
+ }
+
+ result += indent + "}"
+ return result
+}
+
+func renderSequenceNode(node *yaml.Node, indent string) (result string) {
+ result = "[\n"
+ innerIndent := indent + indentation
+ for i := 0; i < len(node.Content); i++ {
+ item := node.Content[i]
+ switch item.Kind {
+ case yaml.ScalarNode:
+ result += innerIndent + "\"" + item.Value + "\""
+ case yaml.MappingNode:
+ result += innerIndent + renderMappingNode(item, innerIndent) + ""
+ default:
+ result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item)
+ }
+ if i < len(node.Content)-1 {
+ result += ","
+ }
+ result += "\n"
+ }
+ result += indent + "]"
+ return result
+}
+
+func renderStringArray(array []string, indent string) (result string) {
+ result = "[\n"
+ innerIndent := indent + indentation
+ for i, item := range array {
+ result += innerIndent + "\"" + item + "\""
+ if i < len(array)-1 {
+ result += ","
+ }
+ result += "\n"
+ }
+ result += indent + "]"
+ return result
+}
+
+// Render renders a yaml.Node as JSON
+func Render(node *yaml.Node) string {
+ if node.Kind == yaml.DocumentNode {
+ if len(node.Content) == 1 {
+ return Render(node.Content[0])
+ }
+ } else if node.Kind == yaml.MappingNode {
+ return renderMappingNode(node, "") + "\n"
+ } else if node.Kind == yaml.SequenceNode {
+ return renderSequenceNode(node, "") + "\n"
+ }
+ return ""
+}
+
+func (object *SchemaNumber) nodeValue() *yaml.Node {
+ if object.Integer != nil {
+ return nodeForInt64(*object.Integer)
+ } else if object.Float != nil {
+ return nodeForFloat64(*object.Float)
+ } else {
+ return nil
+ }
+}
+
+func (object *SchemaOrBoolean) nodeValue() *yaml.Node {
+ if object.Schema != nil {
+ return object.Schema.nodeValue()
+ } else if object.Boolean != nil {
+ return nodeForBoolean(*object.Boolean)
+ } else {
+ return nil
+ }
+}
+
+func nodeForStringArray(array []string) *yaml.Node {
+ content := make([]*yaml.Node, 0)
+ for _, item := range array {
+ content = append(content, nodeForString(item))
+ }
+ return nodeForSequence(content)
+}
+
+func nodeForSchemaArray(array []*Schema) *yaml.Node {
+ content := make([]*yaml.Node, 0)
+ for _, item := range array {
+ content = append(content, item.nodeValue())
+ }
+ return nodeForSequence(content)
+}
+
+func (object *StringOrStringArray) nodeValue() *yaml.Node {
+ if object.String != nil {
+ return nodeForString(*object.String)
+ } else if object.StringArray != nil {
+ return nodeForStringArray(*(object.StringArray))
+ } else {
+ return nil
+ }
+}
+
+func (object *SchemaOrStringArray) nodeValue() *yaml.Node {
+ if object.Schema != nil {
+ return object.Schema.nodeValue()
+ } else if object.StringArray != nil {
+ return nodeForStringArray(*(object.StringArray))
+ } else {
+ return nil
+ }
+}
+
+func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node {
+ if object.Schema != nil {
+ return object.Schema.nodeValue()
+ } else if object.SchemaArray != nil {
+ return nodeForSchemaArray(*(object.SchemaArray))
+ } else {
+ return nil
+ }
+}
+
+func (object *SchemaEnumValue) nodeValue() *yaml.Node {
+ if object.String != nil {
+ return nodeForString(*object.String)
+ } else if object.Bool != nil {
+ return nodeForBoolean(*object.Bool)
+ } else {
+ return nil
+ }
+}
+
+func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node {
+ content := make([]*yaml.Node, 0)
+ for _, pair := range *(array) {
+ content = appendPair(content, pair.Name, pair.Value.nodeValue())
+ }
+ return nodeForMapping(content)
+}
+
+func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node {
+ content := make([]*yaml.Node, 0)
+ for _, pair := range *(array) {
+ content = appendPair(content, pair.Name, pair.Value.nodeValue())
+ }
+ return nodeForMapping(content)
+}
+
+func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node {
+ content := make([]*yaml.Node, 0)
+ for _, item := range *array {
+ content = append(content, item.nodeValue())
+ }
+ return nodeForSequence(content)
+}
+
+func nodeForMapping(content []*yaml.Node) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.MappingNode,
+ Content: content,
+ }
+}
+
+func nodeForSequence(content []*yaml.Node) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.SequenceNode,
+ Content: content,
+ }
+}
+
+func nodeForString(value string) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!str",
+ Value: value,
+ }
+}
+
+func nodeForBoolean(value bool) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!bool",
+ Value: fmt.Sprintf("%t", value),
+ }
+}
+
+func nodeForInt64(value int64) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!int",
+ Value: fmt.Sprintf("%d", value),
+ }
+}
+
+func nodeForFloat64(value float64) *yaml.Node {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: "!!float",
+ Value: fmt.Sprintf("%f", value),
+ }
+}
+
+func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node {
+ nodes = append(nodes, nodeForString(name))
+ nodes = append(nodes, value)
+ return nodes
+}
+
+func (schema *Schema) nodeValue() *yaml.Node {
+ n := &yaml.Node{Kind: yaml.MappingNode}
+ content := make([]*yaml.Node, 0)
+ if schema.Title != nil {
+ content = appendPair(content, "title", nodeForString(*schema.Title))
+ }
+ if schema.ID != nil {
+ content = appendPair(content, "id", nodeForString(*schema.ID))
+ }
+ if schema.Schema != nil {
+ content = appendPair(content, "$schema", nodeForString(*schema.Schema))
+ }
+ if schema.Type != nil {
+ content = appendPair(content, "type", schema.Type.nodeValue())
+ }
+ if schema.Items != nil {
+ content = appendPair(content, "items", schema.Items.nodeValue())
+ }
+ if schema.Description != nil {
+ content = appendPair(content, "description", nodeForString(*schema.Description))
+ }
+ if schema.Required != nil {
+ content = appendPair(content, "required", nodeForStringArray(*schema.Required))
+ }
+ if schema.AdditionalProperties != nil {
+ content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue())
+ }
+ if schema.PatternProperties != nil {
+ content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties))
+ }
+ if schema.Properties != nil {
+ content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties))
+ }
+ if schema.Dependencies != nil {
+ content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies))
+ }
+ if schema.Ref != nil {
+ content = appendPair(content, "$ref", nodeForString(*schema.Ref))
+ }
+ if schema.MultipleOf != nil {
+ content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue())
+ }
+ if schema.Maximum != nil {
+ content = appendPair(content, "maximum", schema.Maximum.nodeValue())
+ }
+ if schema.ExclusiveMaximum != nil {
+ content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum))
+ }
+ if schema.Minimum != nil {
+ content = appendPair(content, "minimum", schema.Minimum.nodeValue())
+ }
+ if schema.ExclusiveMinimum != nil {
+ content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum))
+ }
+ if schema.MaxLength != nil {
+ content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength))
+ }
+ if schema.MinLength != nil {
+ content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength))
+ }
+ if schema.Pattern != nil {
+ content = appendPair(content, "pattern", nodeForString(*schema.Pattern))
+ }
+ if schema.AdditionalItems != nil {
+ content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue())
+ }
+ if schema.MaxItems != nil {
+ content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems))
+ }
+ if schema.MinItems != nil {
+ content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems))
+ }
+ if schema.UniqueItems != nil {
+ content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems))
+ }
+ if schema.MaxProperties != nil {
+ content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties))
+ }
+ if schema.MinProperties != nil {
+ content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties))
+ }
+ if schema.Enumeration != nil {
+ content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration))
+ }
+ if schema.AllOf != nil {
+ content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf))
+ }
+ if schema.AnyOf != nil {
+ content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf))
+ }
+ if schema.OneOf != nil {
+ content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf))
+ }
+ if schema.Not != nil {
+ content = appendPair(content, "not", schema.Not.nodeValue())
+ }
+ if schema.Definitions != nil {
+ content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions))
+ }
+ if schema.Default != nil {
+ // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default})
+ }
+ if schema.Format != nil {
+ content = appendPair(content, "format", nodeForString(*schema.Format))
+ }
+ n.Content = content
+ return n
+}
+
+// JSONString returns a json representation of a schema.
+func (schema *Schema) JSONString() string {
+ node := schema.nodeValue()
+ return Render(node)
+}
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go
index 4fd44c45e..727d7f4ad 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2020 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ package openapi_v2
import (
"fmt"
"github.com/googleapis/gnostic/compiler"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
"regexp"
"strings"
)
@@ -30,7 +30,7 @@ func Version() string {
}
// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not.
-func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) {
+func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) {
errors := make([]error, 0)
x := &AdditionalPropertiesItem{}
matched := false
@@ -39,7 +39,7 @@ func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*Ad
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
+ t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context))
if matchingError == nil {
x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t}
matched = true
@@ -49,28 +49,33 @@ func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*Ad
}
}
// bool boolean = 2;
- boolValue, ok := in.(bool)
+ boolValue, ok := compiler.BoolForScalarNode(in)
if ok {
x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue}
+ matched = true
}
if matched {
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
+ } else {
+ message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem")
+ err := compiler.NewError(context, message)
+ errors = []error{err}
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewAny creates an object of type Any if possible, returning an error if not.
-func NewAny(in interface{}, context *compiler.Context) (*Any, error) {
+func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) {
errors := make([]error, 0)
x := &Any{}
- bytes, _ := yaml.Marshal(in)
+ bytes := compiler.Marshal(in)
x.Yaml = string(bytes)
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not.
-func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) {
+func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) {
errors := make([]error, 0)
x := &ApiKeySecurity{}
m, ok := compiler.UnpackMap(in)
@@ -94,74 +99,74 @@ func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecuri
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [apiKey]
if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string name = 2;
v2 := compiler.MapValueForKey(m, "name")
if v2 != nil {
- x.Name, ok = v2.(string)
+ x.Name, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string in = 3;
v3 := compiler.MapValueForKey(m, "in")
if v3 != nil {
- x.In, ok = v3.(string)
+ x.In, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [header query]
if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 4;
v4 := compiler.MapValueForKey(m, "description")
if v4 != nil {
- x.Description, ok = v4.(string)
+ x.Description, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 5;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -175,7 +180,7 @@ func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecuri
}
// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not.
-func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) {
+func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) {
errors := make([]error, 0)
x := &BasicAuthenticationSecurity{}
m, ok := compiler.UnpackMap(in)
@@ -199,50 +204,50 @@ func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [basic]
if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 2;
v2 := compiler.MapValueForKey(m, "description")
if v2 != nil {
- x.Description, ok = v2.(string)
+ x.Description, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 3;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -256,7 +261,7 @@ func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (
}
// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not.
-func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) {
+func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) {
errors := make([]error, 0)
x := &BodyParameter{}
m, ok := compiler.UnpackMap(in)
@@ -280,42 +285,42 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter
// string description = 1;
v1 := compiler.MapValueForKey(m, "description")
if v1 != nil {
- x.Description, ok = v1.(string)
+ x.Description, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string name = 2;
v2 := compiler.MapValueForKey(m, "name")
if v2 != nil {
- x.Name, ok = v2.(string)
+ x.Name, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string in = 3;
v3 := compiler.MapValueForKey(m, "in")
if v3 != nil {
- x.In, ok = v3.(string)
+ x.In, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [body]
if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool required = 4;
v4 := compiler.MapValueForKey(m, "required")
if v4 != nil {
- x.Required, ok = v4.(bool)
+ x.Required, ok = compiler.BoolForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -323,7 +328,7 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter
v5 := compiler.MapValueForKey(m, "schema")
if v5 != nil {
var err error
- x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context))
+ x.Schema, err = NewSchema(v5, compiler.NewContext("schema", v5, context))
if err != nil {
errors = append(errors, err)
}
@@ -331,26 +336,26 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter
// repeated NamedAny vendor_extension = 6;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -364,7 +369,7 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter
}
// NewContact creates an object of type Contact if possible, returning an error if not.
-func NewContact(in interface{}, context *compiler.Context) (*Contact, error) {
+func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) {
errors := make([]error, 0)
x := &Contact{}
m, ok := compiler.UnpackMap(in)
@@ -382,53 +387,53 @@ func NewContact(in interface{}, context *compiler.Context) (*Contact, error) {
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string url = 2;
v2 := compiler.MapValueForKey(m, "url")
if v2 != nil {
- x.Url, ok = v2.(string)
+ x.Url, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string email = 3;
v3 := compiler.MapValueForKey(m, "email")
if v3 != nil {
- x.Email, ok = v3.(string)
+ x.Email, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 4;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -442,7 +447,7 @@ func NewContact(in interface{}, context *compiler.Context) (*Contact, error) {
}
// NewDefault creates an object of type Default if possible, returning an error if not.
-func NewDefault(in interface{}, context *compiler.Context) (*Default, error) {
+func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) {
errors := make([]error, 0)
x := &Default{}
m, ok := compiler.UnpackMap(in)
@@ -453,25 +458,25 @@ func NewDefault(in interface{}, context *compiler.Context) (*Default, error) {
// repeated NamedAny additional_properties = 1;
// MAP: Any
x.AdditionalProperties = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -484,7 +489,7 @@ func NewDefault(in interface{}, context *compiler.Context) (*Default, error) {
}
// NewDefinitions creates an object of type Definitions if possible, returning an error if not.
-func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) {
+func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) {
errors := make([]error, 0)
x := &Definitions{}
m, ok := compiler.UnpackMap(in)
@@ -495,14 +500,14 @@ func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, er
// repeated NamedSchema additional_properties = 1;
// MAP: Schema
x.AdditionalProperties = make([]*NamedSchema, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedSchema{}
pair.Name = k
var err error
- pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
+ pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -514,7 +519,7 @@ func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, er
}
// NewDocument creates an object of type Document if possible, returning an error if not.
-func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
+func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) {
errors := make([]error, 0)
x := &Document{}
m, ok := compiler.UnpackMap(in)
@@ -538,15 +543,15 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
// string swagger = 1;
v1 := compiler.MapValueForKey(m, "swagger")
if v1 != nil {
- x.Swagger, ok = v1.(string)
+ x.Swagger, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [2.0]
if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) {
- message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -554,7 +559,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
v2 := compiler.MapValueForKey(m, "info")
if v2 != nil {
var err error
- x.Info, err = NewInfo(v2, compiler.NewContext("info", context))
+ x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -562,57 +567,57 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
// string host = 3;
v3 := compiler.MapValueForKey(m, "host")
if v3 != nil {
- x.Host, ok = v3.(string)
+ x.Host, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string base_path = 4;
v4 := compiler.MapValueForKey(m, "basePath")
if v4 != nil {
- x.BasePath, ok = v4.(string)
+ x.BasePath, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated string schemes = 5;
v5 := compiler.MapValueForKey(m, "schemes")
if v5 != nil {
- v, ok := v5.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v5)
if ok {
- x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Schemes = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [http https ws wss]
if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
- message := fmt.Sprintf("has unexpected value for schemes: %+v", v5)
+ message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated string consumes = 6;
v6 := compiler.MapValueForKey(m, "consumes")
if v6 != nil {
- v, ok := v6.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v6)
if ok {
- x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Consumes = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated string produces = 7;
v7 := compiler.MapValueForKey(m, "produces")
if v7 != nil {
- v, ok := v7.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v7)
if ok {
- x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Produces = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7)
+ message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -620,7 +625,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
v8 := compiler.MapValueForKey(m, "paths")
if v8 != nil {
var err error
- x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context))
+ x.Paths, err = NewPaths(v8, compiler.NewContext("paths", v8, context))
if err != nil {
errors = append(errors, err)
}
@@ -629,7 +634,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
v9 := compiler.MapValueForKey(m, "definitions")
if v9 != nil {
var err error
- x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context))
+ x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", v9, context))
if err != nil {
errors = append(errors, err)
}
@@ -638,7 +643,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
v10 := compiler.MapValueForKey(m, "parameters")
if v10 != nil {
var err error
- x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context))
+ x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", v10, context))
if err != nil {
errors = append(errors, err)
}
@@ -647,7 +652,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
v11 := compiler.MapValueForKey(m, "responses")
if v11 != nil {
var err error
- x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context))
+ x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", v11, context))
if err != nil {
errors = append(errors, err)
}
@@ -657,10 +662,10 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
if v12 != nil {
// repeated SecurityRequirement
x.Security = make([]*SecurityRequirement, 0)
- a, ok := v12.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v12)
if ok {
- for _, item := range a {
- y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
+ for _, item := range a.Content {
+ y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -672,7 +677,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
v13 := compiler.MapValueForKey(m, "securityDefinitions")
if v13 != nil {
var err error
- x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context))
+ x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", v13, context))
if err != nil {
errors = append(errors, err)
}
@@ -682,10 +687,10 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
if v14 != nil {
// repeated Tag
x.Tags = make([]*Tag, 0)
- a, ok := v14.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v14)
if ok {
- for _, item := range a {
- y, err := NewTag(item, compiler.NewContext("tags", context))
+ for _, item := range a.Content {
+ y, err := NewTag(item, compiler.NewContext("tags", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -697,7 +702,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
v15 := compiler.MapValueForKey(m, "externalDocs")
if v15 != nil {
var err error
- x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context))
+ x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", v15, context))
if err != nil {
errors = append(errors, err)
}
@@ -705,26 +710,26 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
// repeated NamedAny vendor_extension = 16;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -738,7 +743,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
}
// NewExamples creates an object of type Examples if possible, returning an error if not.
-func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) {
+func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) {
errors := make([]error, 0)
x := &Examples{}
m, ok := compiler.UnpackMap(in)
@@ -749,25 +754,25 @@ func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) {
// repeated NamedAny additional_properties = 1;
// MAP: Any
x.AdditionalProperties = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -780,7 +785,7 @@ func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) {
}
// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not.
-func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) {
+func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) {
errors := make([]error, 0)
x := &ExternalDocs{}
m, ok := compiler.UnpackMap(in)
@@ -804,44 +809,44 @@ func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs,
// string description = 1;
v1 := compiler.MapValueForKey(m, "description")
if v1 != nil {
- x.Description, ok = v1.(string)
+ x.Description, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string url = 2;
v2 := compiler.MapValueForKey(m, "url")
if v2 != nil {
- x.Url, ok = v2.(string)
+ x.Url, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 3;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -855,7 +860,7 @@ func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs,
}
// NewFileSchema creates an object of type FileSchema if possible, returning an error if not.
-func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) {
+func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) {
errors := make([]error, 0)
x := &FileSchema{}
m, ok := compiler.UnpackMap(in)
@@ -879,27 +884,27 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro
// string format = 1;
v1 := compiler.MapValueForKey(m, "format")
if v1 != nil {
- x.Format, ok = v1.(string)
+ x.Format, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string title = 2;
v2 := compiler.MapValueForKey(m, "title")
if v2 != nil {
- x.Title, ok = v2.(string)
+ x.Title, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 3;
v3 := compiler.MapValueForKey(m, "description")
if v3 != nil {
- x.Description, ok = v3.(string)
+ x.Description, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -907,7 +912,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro
v4 := compiler.MapValueForKey(m, "default")
if v4 != nil {
var err error
- x.Default, err = NewAny(v4, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v4, compiler.NewContext("default", v4, context))
if err != nil {
errors = append(errors, err)
}
@@ -915,35 +920,35 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro
// repeated string required = 5;
v5 := compiler.MapValueForKey(m, "required")
if v5 != nil {
- v, ok := v5.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v5)
if ok {
- x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Required = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// string type = 6;
v6 := compiler.MapValueForKey(m, "type")
if v6 != nil {
- x.Type, ok = v6.(string)
+ x.Type, ok = compiler.StringForScalarNode(v6)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [file]
if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool read_only = 7;
v7 := compiler.MapValueForKey(m, "readOnly")
if v7 != nil {
- x.ReadOnly, ok = v7.(bool)
+ x.ReadOnly, ok = compiler.BoolForScalarNode(v7)
if !ok {
- message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7)
+ message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -951,7 +956,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro
v8 := compiler.MapValueForKey(m, "externalDocs")
if v8 != nil {
var err error
- x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context))
+ x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context))
if err != nil {
errors = append(errors, err)
}
@@ -960,7 +965,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro
v9 := compiler.MapValueForKey(m, "example")
if v9 != nil {
var err error
- x.Example, err = NewAny(v9, compiler.NewContext("example", context))
+ x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context))
if err != nil {
errors = append(errors, err)
}
@@ -968,26 +973,26 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro
// repeated NamedAny vendor_extension = 10;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -1001,7 +1006,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro
}
// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not.
-func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) {
+func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) {
errors := make([]error, 0)
x := &FormDataParameterSubSchema{}
m, ok := compiler.UnpackMap(in)
@@ -1019,75 +1024,75 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
// bool required = 1;
v1 := compiler.MapValueForKey(m, "required")
if v1 != nil {
- x.Required, ok = v1.(bool)
+ x.Required, ok = compiler.BoolForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string in = 2;
v2 := compiler.MapValueForKey(m, "in")
if v2 != nil {
- x.In, ok = v2.(string)
+ x.In, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [formData]
if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 3;
v3 := compiler.MapValueForKey(m, "description")
if v3 != nil {
- x.Description, ok = v3.(string)
+ x.Description, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string name = 4;
v4 := compiler.MapValueForKey(m, "name")
if v4 != nil {
- x.Name, ok = v4.(string)
+ x.Name, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool allow_empty_value = 5;
v5 := compiler.MapValueForKey(m, "allowEmptyValue")
if v5 != nil {
- x.AllowEmptyValue, ok = v5.(bool)
+ x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// string type = 6;
v6 := compiler.MapValueForKey(m, "type")
if v6 != nil {
- x.Type, ok = v6.(string)
+ x.Type, ok = compiler.StringForScalarNode(v6)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [string number boolean integer array file]
if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// string format = 7;
v7 := compiler.MapValueForKey(m, "format")
if v7 != nil {
- x.Format, ok = v7.(string)
+ x.Format, ok = compiler.StringForScalarNode(v7)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1095,7 +1100,7 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
v8 := compiler.MapValueForKey(m, "items")
if v8 != nil {
var err error
- x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
+ x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context))
if err != nil {
errors = append(errors, err)
}
@@ -1103,15 +1108,15 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
// string collection_format = 9;
v9 := compiler.MapValueForKey(m, "collectionFormat")
if v9 != nil {
- x.CollectionFormat, ok = v9.(string)
+ x.CollectionFormat, ok = compiler.StringForScalarNode(v9)
if !ok {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [csv ssv tsv pipes multi]
if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1119,7 +1124,7 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
v10 := compiler.MapValueForKey(m, "default")
if v10 != nil {
var err error
- x.Default, err = NewAny(v10, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context))
if err != nil {
errors = append(errors, err)
}
@@ -1127,126 +1132,102 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
// float maximum = 11;
v11 := compiler.MapValueForKey(m, "maximum")
if v11 != nil {
- switch v11 := v11.(type) {
- case float64:
- x.Maximum = v11
- case float32:
- x.Maximum = float64(v11)
- case uint64:
- x.Maximum = float64(v11)
- case uint32:
- x.Maximum = float64(v11)
- case int64:
- x.Maximum = float64(v11)
- case int32:
- x.Maximum = float64(v11)
- case int:
- x.Maximum = float64(v11)
- default:
- message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
+ v, ok := compiler.FloatForScalarNode(v11)
+ if ok {
+ x.Maximum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_maximum = 12;
v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
if v12 != nil {
- x.ExclusiveMaximum, ok = v12.(bool)
+ x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
+ message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12))
errors = append(errors, compiler.NewError(context, message))
}
}
// float minimum = 13;
v13 := compiler.MapValueForKey(m, "minimum")
if v13 != nil {
- switch v13 := v13.(type) {
- case float64:
- x.Minimum = v13
- case float32:
- x.Minimum = float64(v13)
- case uint64:
- x.Minimum = float64(v13)
- case uint32:
- x.Minimum = float64(v13)
- case int64:
- x.Minimum = float64(v13)
- case int32:
- x.Minimum = float64(v13)
- case int:
- x.Minimum = float64(v13)
- default:
- message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
+ v, ok := compiler.FloatForScalarNode(v13)
+ if ok {
+ x.Minimum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_minimum = 14;
v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
if v14 != nil {
- x.ExclusiveMinimum, ok = v14.(bool)
+ x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
+ message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_length = 15;
v15 := compiler.MapValueForKey(m, "maxLength")
if v15 != nil {
- t, ok := v15.(int)
+ t, ok := compiler.IntForScalarNode(v15)
if ok {
x.MaxLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
+ message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_length = 16;
v16 := compiler.MapValueForKey(m, "minLength")
if v16 != nil {
- t, ok := v16.(int)
+ t, ok := compiler.IntForScalarNode(v16)
if ok {
x.MinLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
+ message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16))
errors = append(errors, compiler.NewError(context, message))
}
}
// string pattern = 17;
v17 := compiler.MapValueForKey(m, "pattern")
if v17 != nil {
- x.Pattern, ok = v17.(string)
+ x.Pattern, ok = compiler.StringForScalarNode(v17)
if !ok {
- message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
+ message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_items = 18;
v18 := compiler.MapValueForKey(m, "maxItems")
if v18 != nil {
- t, ok := v18.(int)
+ t, ok := compiler.IntForScalarNode(v18)
if ok {
x.MaxItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
+ message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_items = 19;
v19 := compiler.MapValueForKey(m, "minItems")
if v19 != nil {
- t, ok := v19.(int)
+ t, ok := compiler.IntForScalarNode(v19)
if ok {
x.MinItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
+ message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool unique_items = 20;
v20 := compiler.MapValueForKey(m, "uniqueItems")
if v20 != nil {
- x.UniqueItems, ok = v20.(bool)
+ x.UniqueItems, ok = compiler.BoolForScalarNode(v20)
if !ok {
- message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
+ message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1255,10 +1236,10 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
if v21 != nil {
// repeated Any
x.Enum = make([]*Any, 0)
- a, ok := v21.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v21)
if ok {
- for _, item := range a {
- y, err := NewAny(item, compiler.NewContext("enum", context))
+ for _, item := range a.Content {
+ y, err := NewAny(item, compiler.NewContext("enum", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -1269,49 +1250,37 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
// float multiple_of = 22;
v22 := compiler.MapValueForKey(m, "multipleOf")
if v22 != nil {
- switch v22 := v22.(type) {
- case float64:
- x.MultipleOf = v22
- case float32:
- x.MultipleOf = float64(v22)
- case uint64:
- x.MultipleOf = float64(v22)
- case uint32:
- x.MultipleOf = float64(v22)
- case int64:
- x.MultipleOf = float64(v22)
- case int32:
- x.MultipleOf = float64(v22)
- case int:
- x.MultipleOf = float64(v22)
- default:
- message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
+ v, ok := compiler.FloatForScalarNode(v22)
+ if ok {
+ x.MultipleOf = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 23;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -1325,7 +1294,7 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*
}
// NewHeader creates an object of type Header if possible, returning an error if not.
-func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
+func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) {
errors := make([]error, 0)
x := &Header{}
m, ok := compiler.UnpackMap(in)
@@ -1349,24 +1318,24 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [string number integer boolean array]
if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string format = 2;
v2 := compiler.MapValueForKey(m, "format")
if v2 != nil {
- x.Format, ok = v2.(string)
+ x.Format, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1374,7 +1343,7 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
v3 := compiler.MapValueForKey(m, "items")
if v3 != nil {
var err error
- x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
+ x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -1382,15 +1351,15 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
// string collection_format = 4;
v4 := compiler.MapValueForKey(m, "collectionFormat")
if v4 != nil {
- x.CollectionFormat, ok = v4.(string)
+ x.CollectionFormat, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [csv ssv tsv pipes]
if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1398,7 +1367,7 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
v5 := compiler.MapValueForKey(m, "default")
if v5 != nil {
var err error
- x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context))
if err != nil {
errors = append(errors, err)
}
@@ -1406,126 +1375,102 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
// float maximum = 6;
v6 := compiler.MapValueForKey(m, "maximum")
if v6 != nil {
- switch v6 := v6.(type) {
- case float64:
- x.Maximum = v6
- case float32:
- x.Maximum = float64(v6)
- case uint64:
- x.Maximum = float64(v6)
- case uint32:
- x.Maximum = float64(v6)
- case int64:
- x.Maximum = float64(v6)
- case int32:
- x.Maximum = float64(v6)
- case int:
- x.Maximum = float64(v6)
- default:
- message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
+ v, ok := compiler.FloatForScalarNode(v6)
+ if ok {
+ x.Maximum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_maximum = 7;
v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
if v7 != nil {
- x.ExclusiveMaximum, ok = v7.(bool)
+ x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
+ message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
// float minimum = 8;
v8 := compiler.MapValueForKey(m, "minimum")
if v8 != nil {
- switch v8 := v8.(type) {
- case float64:
- x.Minimum = v8
- case float32:
- x.Minimum = float64(v8)
- case uint64:
- x.Minimum = float64(v8)
- case uint32:
- x.Minimum = float64(v8)
- case int64:
- x.Minimum = float64(v8)
- case int32:
- x.Minimum = float64(v8)
- case int:
- x.Minimum = float64(v8)
- default:
- message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
+ v, ok := compiler.FloatForScalarNode(v8)
+ if ok {
+ x.Minimum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_minimum = 9;
v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
if v9 != nil {
- x.ExclusiveMinimum, ok = v9.(bool)
+ x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
+ message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_length = 10;
v10 := compiler.MapValueForKey(m, "maxLength")
if v10 != nil {
- t, ok := v10.(int)
+ t, ok := compiler.IntForScalarNode(v10)
if ok {
x.MaxLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
+ message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_length = 11;
v11 := compiler.MapValueForKey(m, "minLength")
if v11 != nil {
- t, ok := v11.(int)
+ t, ok := compiler.IntForScalarNode(v11)
if ok {
x.MinLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
+ message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
// string pattern = 12;
v12 := compiler.MapValueForKey(m, "pattern")
if v12 != nil {
- x.Pattern, ok = v12.(string)
+ x.Pattern, ok = compiler.StringForScalarNode(v12)
if !ok {
- message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
+ message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_items = 13;
v13 := compiler.MapValueForKey(m, "maxItems")
if v13 != nil {
- t, ok := v13.(int)
+ t, ok := compiler.IntForScalarNode(v13)
if ok {
x.MaxItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
+ message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_items = 14;
v14 := compiler.MapValueForKey(m, "minItems")
if v14 != nil {
- t, ok := v14.(int)
+ t, ok := compiler.IntForScalarNode(v14)
if ok {
x.MinItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
+ message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool unique_items = 15;
v15 := compiler.MapValueForKey(m, "uniqueItems")
if v15 != nil {
- x.UniqueItems, ok = v15.(bool)
+ x.UniqueItems, ok = compiler.BoolForScalarNode(v15)
if !ok {
- message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
+ message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1534,10 +1479,10 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
if v16 != nil {
// repeated Any
x.Enum = make([]*Any, 0)
- a, ok := v16.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v16)
if ok {
- for _, item := range a {
- y, err := NewAny(item, compiler.NewContext("enum", context))
+ for _, item := range a.Content {
+ y, err := NewAny(item, compiler.NewContext("enum", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -1548,58 +1493,46 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
// float multiple_of = 17;
v17 := compiler.MapValueForKey(m, "multipleOf")
if v17 != nil {
- switch v17 := v17.(type) {
- case float64:
- x.MultipleOf = v17
- case float32:
- x.MultipleOf = float64(v17)
- case uint64:
- x.MultipleOf = float64(v17)
- case uint32:
- x.MultipleOf = float64(v17)
- case int64:
- x.MultipleOf = float64(v17)
- case int32:
- x.MultipleOf = float64(v17)
- case int:
- x.MultipleOf = float64(v17)
- default:
- message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
+ v, ok := compiler.FloatForScalarNode(v17)
+ if ok {
+ x.MultipleOf = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 18;
v18 := compiler.MapValueForKey(m, "description")
if v18 != nil {
- x.Description, ok = v18.(string)
+ x.Description, ok = compiler.StringForScalarNode(v18)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 19;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -1613,7 +1546,7 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
}
// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not.
-func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) {
+func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) {
errors := make([]error, 0)
x := &HeaderParameterSubSchema{}
m, ok := compiler.UnpackMap(in)
@@ -1631,66 +1564,66 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
// bool required = 1;
v1 := compiler.MapValueForKey(m, "required")
if v1 != nil {
- x.Required, ok = v1.(bool)
+ x.Required, ok = compiler.BoolForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string in = 2;
v2 := compiler.MapValueForKey(m, "in")
if v2 != nil {
- x.In, ok = v2.(string)
+ x.In, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [header]
if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 3;
v3 := compiler.MapValueForKey(m, "description")
if v3 != nil {
- x.Description, ok = v3.(string)
+ x.Description, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string name = 4;
v4 := compiler.MapValueForKey(m, "name")
if v4 != nil {
- x.Name, ok = v4.(string)
+ x.Name, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// string type = 5;
v5 := compiler.MapValueForKey(m, "type")
if v5 != nil {
- x.Type, ok = v5.(string)
+ x.Type, ok = compiler.StringForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [string number boolean integer array]
if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// string format = 6;
v6 := compiler.MapValueForKey(m, "format")
if v6 != nil {
- x.Format, ok = v6.(string)
+ x.Format, ok = compiler.StringForScalarNode(v6)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1698,7 +1631,7 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
v7 := compiler.MapValueForKey(m, "items")
if v7 != nil {
var err error
- x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
+ x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context))
if err != nil {
errors = append(errors, err)
}
@@ -1706,15 +1639,15 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
// string collection_format = 8;
v8 := compiler.MapValueForKey(m, "collectionFormat")
if v8 != nil {
- x.CollectionFormat, ok = v8.(string)
+ x.CollectionFormat, ok = compiler.StringForScalarNode(v8)
if !ok {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [csv ssv tsv pipes]
if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1722,7 +1655,7 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
v9 := compiler.MapValueForKey(m, "default")
if v9 != nil {
var err error
- x.Default, err = NewAny(v9, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context))
if err != nil {
errors = append(errors, err)
}
@@ -1730,126 +1663,102 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
// float maximum = 10;
v10 := compiler.MapValueForKey(m, "maximum")
if v10 != nil {
- switch v10 := v10.(type) {
- case float64:
- x.Maximum = v10
- case float32:
- x.Maximum = float64(v10)
- case uint64:
- x.Maximum = float64(v10)
- case uint32:
- x.Maximum = float64(v10)
- case int64:
- x.Maximum = float64(v10)
- case int32:
- x.Maximum = float64(v10)
- case int:
- x.Maximum = float64(v10)
- default:
- message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
+ v, ok := compiler.FloatForScalarNode(v10)
+ if ok {
+ x.Maximum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_maximum = 11;
v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
if v11 != nil {
- x.ExclusiveMaximum, ok = v11.(bool)
+ x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
+ message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
// float minimum = 12;
v12 := compiler.MapValueForKey(m, "minimum")
if v12 != nil {
- switch v12 := v12.(type) {
- case float64:
- x.Minimum = v12
- case float32:
- x.Minimum = float64(v12)
- case uint64:
- x.Minimum = float64(v12)
- case uint32:
- x.Minimum = float64(v12)
- case int64:
- x.Minimum = float64(v12)
- case int32:
- x.Minimum = float64(v12)
- case int:
- x.Minimum = float64(v12)
- default:
- message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
+ v, ok := compiler.FloatForScalarNode(v12)
+ if ok {
+ x.Minimum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_minimum = 13;
v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
if v13 != nil {
- x.ExclusiveMinimum, ok = v13.(bool)
+ x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
+ message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_length = 14;
v14 := compiler.MapValueForKey(m, "maxLength")
if v14 != nil {
- t, ok := v14.(int)
+ t, ok := compiler.IntForScalarNode(v14)
if ok {
x.MaxLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
+ message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_length = 15;
v15 := compiler.MapValueForKey(m, "minLength")
if v15 != nil {
- t, ok := v15.(int)
+ t, ok := compiler.IntForScalarNode(v15)
if ok {
x.MinLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
+ message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15))
errors = append(errors, compiler.NewError(context, message))
}
}
// string pattern = 16;
v16 := compiler.MapValueForKey(m, "pattern")
if v16 != nil {
- x.Pattern, ok = v16.(string)
+ x.Pattern, ok = compiler.StringForScalarNode(v16)
if !ok {
- message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
+ message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_items = 17;
v17 := compiler.MapValueForKey(m, "maxItems")
if v17 != nil {
- t, ok := v17.(int)
+ t, ok := compiler.IntForScalarNode(v17)
if ok {
x.MaxItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
+ message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_items = 18;
v18 := compiler.MapValueForKey(m, "minItems")
if v18 != nil {
- t, ok := v18.(int)
+ t, ok := compiler.IntForScalarNode(v18)
if ok {
x.MinItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
+ message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool unique_items = 19;
v19 := compiler.MapValueForKey(m, "uniqueItems")
if v19 != nil {
- x.UniqueItems, ok = v19.(bool)
+ x.UniqueItems, ok = compiler.BoolForScalarNode(v19)
if !ok {
- message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
+ message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -1858,10 +1767,10 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
if v20 != nil {
// repeated Any
x.Enum = make([]*Any, 0)
- a, ok := v20.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v20)
if ok {
- for _, item := range a {
- y, err := NewAny(item, compiler.NewContext("enum", context))
+ for _, item := range a.Content {
+ y, err := NewAny(item, compiler.NewContext("enum", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -1872,49 +1781,37 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
// float multiple_of = 21;
v21 := compiler.MapValueForKey(m, "multipleOf")
if v21 != nil {
- switch v21 := v21.(type) {
- case float64:
- x.MultipleOf = v21
- case float32:
- x.MultipleOf = float64(v21)
- case uint64:
- x.MultipleOf = float64(v21)
- case uint32:
- x.MultipleOf = float64(v21)
- case int64:
- x.MultipleOf = float64(v21)
- case int32:
- x.MultipleOf = float64(v21)
- case int:
- x.MultipleOf = float64(v21)
- default:
- message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
+ v, ok := compiler.FloatForScalarNode(v21)
+ if ok {
+ x.MultipleOf = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 22;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -1928,7 +1825,7 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He
}
// NewHeaders creates an object of type Headers if possible, returning an error if not.
-func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) {
+func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) {
errors := make([]error, 0)
x := &Headers{}
m, ok := compiler.UnpackMap(in)
@@ -1939,14 +1836,14 @@ func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) {
// repeated NamedHeader additional_properties = 1;
// MAP: Header
x.AdditionalProperties = make([]*NamedHeader, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedHeader{}
pair.Name = k
var err error
- pair.Value, err = NewHeader(v, compiler.NewContext(k, context))
+ pair.Value, err = NewHeader(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -1958,7 +1855,7 @@ func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) {
}
// NewInfo creates an object of type Info if possible, returning an error if not.
-func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
+func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) {
errors := make([]error, 0)
x := &Info{}
m, ok := compiler.UnpackMap(in)
@@ -1982,36 +1879,36 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
// string title = 1;
v1 := compiler.MapValueForKey(m, "title")
if v1 != nil {
- x.Title, ok = v1.(string)
+ x.Title, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string version = 2;
v2 := compiler.MapValueForKey(m, "version")
if v2 != nil {
- x.Version, ok = v2.(string)
+ x.Version, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 3;
v3 := compiler.MapValueForKey(m, "description")
if v3 != nil {
- x.Description, ok = v3.(string)
+ x.Description, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string terms_of_service = 4;
v4 := compiler.MapValueForKey(m, "termsOfService")
if v4 != nil {
- x.TermsOfService, ok = v4.(string)
+ x.TermsOfService, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2019,7 +1916,7 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
v5 := compiler.MapValueForKey(m, "contact")
if v5 != nil {
var err error
- x.Contact, err = NewContact(v5, compiler.NewContext("contact", context))
+ x.Contact, err = NewContact(v5, compiler.NewContext("contact", v5, context))
if err != nil {
errors = append(errors, err)
}
@@ -2028,7 +1925,7 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
v6 := compiler.MapValueForKey(m, "license")
if v6 != nil {
var err error
- x.License, err = NewLicense(v6, compiler.NewContext("license", context))
+ x.License, err = NewLicense(v6, compiler.NewContext("license", v6, context))
if err != nil {
errors = append(errors, err)
}
@@ -2036,26 +1933,26 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
// repeated NamedAny vendor_extension = 7;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -2069,7 +1966,7 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
}
// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not.
-func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) {
+func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) {
errors := make([]error, 0)
x := &ItemsItem{}
m, ok := compiler.UnpackMap(in)
@@ -2078,7 +1975,7 @@ func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error)
errors = append(errors, compiler.NewError(context, message))
} else {
x.Schema = make([]*Schema, 0)
- y, err := NewSchema(m, compiler.NewContext("", context))
+ y, err := NewSchema(m, compiler.NewContext("", m, context))
if err != nil {
return nil, err
}
@@ -2088,7 +1985,7 @@ func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error)
}
// NewJsonReference creates an object of type JsonReference if possible, returning an error if not.
-func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) {
+func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) {
errors := make([]error, 0)
x := &JsonReference{}
m, ok := compiler.UnpackMap(in)
@@ -2102,28 +1999,21 @@ func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference
message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
errors = append(errors, compiler.NewError(context, message))
}
- allowedKeys := []string{"$ref", "description"}
- var allowedPatterns []*regexp.Regexp
- invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
- if len(invalidKeys) > 0 {
- message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
- errors = append(errors, compiler.NewError(context, message))
- }
// string _ref = 1;
v1 := compiler.MapValueForKey(m, "$ref")
if v1 != nil {
- x.XRef, ok = v1.(string)
+ x.XRef, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 2;
v2 := compiler.MapValueForKey(m, "description")
if v2 != nil {
- x.Description, ok = v2.(string)
+ x.Description, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2132,7 +2022,7 @@ func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference
}
// NewLicense creates an object of type License if possible, returning an error if not.
-func NewLicense(in interface{}, context *compiler.Context) (*License, error) {
+func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) {
errors := make([]error, 0)
x := &License{}
m, ok := compiler.UnpackMap(in)
@@ -2156,44 +2046,44 @@ func NewLicense(in interface{}, context *compiler.Context) (*License, error) {
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string url = 2;
v2 := compiler.MapValueForKey(m, "url")
if v2 != nil {
- x.Url, ok = v2.(string)
+ x.Url, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 3;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -2207,7 +2097,7 @@ func NewLicense(in interface{}, context *compiler.Context) (*License, error) {
}
// NewNamedAny creates an object of type NamedAny if possible, returning an error if not.
-func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) {
+func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) {
errors := make([]error, 0)
x := &NamedAny{}
m, ok := compiler.UnpackMap(in)
@@ -2225,9 +2115,9 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) {
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2235,7 +2125,7 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) {
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewAny(v2, compiler.NewContext("value", context))
+ x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2245,7 +2135,7 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) {
}
// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not.
-func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) {
+func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) {
errors := make([]error, 0)
x := &NamedHeader{}
m, ok := compiler.UnpackMap(in)
@@ -2263,9 +2153,9 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2273,7 +2163,7 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewHeader(v2, compiler.NewContext("value", context))
+ x.Value, err = NewHeader(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2283,7 +2173,7 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er
}
// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not.
-func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) {
+func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) {
errors := make([]error, 0)
x := &NamedParameter{}
m, ok := compiler.UnpackMap(in)
@@ -2301,9 +2191,9 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2311,7 +2201,7 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewParameter(v2, compiler.NewContext("value", context))
+ x.Value, err = NewParameter(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2321,7 +2211,7 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet
}
// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not.
-func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) {
+func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) {
errors := make([]error, 0)
x := &NamedPathItem{}
m, ok := compiler.UnpackMap(in)
@@ -2339,9 +2229,9 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2349,7 +2239,7 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewPathItem(v2, compiler.NewContext("value", context))
+ x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2359,7 +2249,7 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem
}
// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not.
-func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) {
+func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) {
errors := make([]error, 0)
x := &NamedResponse{}
m, ok := compiler.UnpackMap(in)
@@ -2377,9 +2267,9 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2387,7 +2277,7 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewResponse(v2, compiler.NewContext("value", context))
+ x.Value, err = NewResponse(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2397,7 +2287,7 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse
}
// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not.
-func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) {
+func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) {
errors := make([]error, 0)
x := &NamedResponseValue{}
m, ok := compiler.UnpackMap(in)
@@ -2415,9 +2305,9 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2425,7 +2315,7 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context))
+ x.Value, err = NewResponseValue(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2435,7 +2325,7 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes
}
// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not.
-func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) {
+func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) {
errors := make([]error, 0)
x := &NamedSchema{}
m, ok := compiler.UnpackMap(in)
@@ -2453,9 +2343,9 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2463,7 +2353,7 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewSchema(v2, compiler.NewContext("value", context))
+ x.Value, err = NewSchema(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2473,7 +2363,7 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er
}
// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not.
-func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) {
+func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) {
errors := make([]error, 0)
x := &NamedSecurityDefinitionsItem{}
m, ok := compiler.UnpackMap(in)
@@ -2491,9 +2381,9 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context)
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2501,7 +2391,7 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context)
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context))
+ x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2511,7 +2401,7 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context)
}
// NewNamedString creates an object of type NamedString if possible, returning an error if not.
-func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) {
+func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) {
errors := make([]error, 0)
x := &NamedString{}
m, ok := compiler.UnpackMap(in)
@@ -2529,18 +2419,18 @@ func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, er
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string value = 2;
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
- x.Value, ok = v2.(string)
+ x.Value, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2549,7 +2439,7 @@ func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, er
}
// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not.
-func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) {
+func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) {
errors := make([]error, 0)
x := &NamedStringArray{}
m, ok := compiler.UnpackMap(in)
@@ -2567,9 +2457,9 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2577,7 +2467,7 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin
v2 := compiler.MapValueForKey(m, "value")
if v2 != nil {
var err error
- x.Value, err = NewStringArray(v2, compiler.NewContext("value", context))
+ x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -2587,7 +2477,7 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin
}
// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not.
-func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) {
+func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) {
errors := make([]error, 0)
x := &NonBodyParameter{}
matched := false
@@ -2605,7 +2495,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar
// HeaderParameterSubSchema header_parameter_sub_schema = 1;
{
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context))
+ t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", m, context))
if matchingError == nil {
x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t}
matched = true
@@ -2616,7 +2506,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar
// FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
{
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context))
+ t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", m, context))
if matchingError == nil {
x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t}
matched = true
@@ -2627,7 +2517,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar
// QueryParameterSubSchema query_parameter_sub_schema = 3;
{
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context))
+ t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", m, context))
if matchingError == nil {
x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t}
matched = true
@@ -2638,7 +2528,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar
// PathParameterSubSchema path_parameter_sub_schema = 4;
{
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context))
+ t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", m, context))
if matchingError == nil {
x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t}
matched = true
@@ -2650,12 +2540,16 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar
if matched {
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
+ } else {
+ message := fmt.Sprintf("contains an invalid NonBodyParameter")
+ err := compiler.NewError(context, message)
+ errors = []error{err}
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not.
-func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) {
+func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) {
errors := make([]error, 0)
x := &Oauth2AccessCodeSecurity{}
m, ok := compiler.UnpackMap(in)
@@ -2679,30 +2573,30 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [oauth2]
if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string flow = 2;
v2 := compiler.MapValueForKey(m, "flow")
if v2 != nil {
- x.Flow, ok = v2.(string)
+ x.Flow, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [accessCode]
if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2710,7 +2604,7 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa
v3 := compiler.MapValueForKey(m, "scopes")
if v3 != nil {
var err error
- x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+ x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -2718,53 +2612,53 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa
// string authorization_url = 4;
v4 := compiler.MapValueForKey(m, "authorizationUrl")
if v4 != nil {
- x.AuthorizationUrl, ok = v4.(string)
+ x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// string token_url = 5;
v5 := compiler.MapValueForKey(m, "tokenUrl")
if v5 != nil {
- x.TokenUrl, ok = v5.(string)
+ x.TokenUrl, ok = compiler.StringForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 6;
v6 := compiler.MapValueForKey(m, "description")
if v6 != nil {
- x.Description, ok = v6.(string)
+ x.Description, ok = compiler.StringForScalarNode(v6)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 7;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -2778,7 +2672,7 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa
}
// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not.
-func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) {
+func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) {
errors := make([]error, 0)
x := &Oauth2ApplicationSecurity{}
m, ok := compiler.UnpackMap(in)
@@ -2802,30 +2696,30 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [oauth2]
if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string flow = 2;
v2 := compiler.MapValueForKey(m, "flow")
if v2 != nil {
- x.Flow, ok = v2.(string)
+ x.Flow, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [application]
if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2833,7 +2727,7 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O
v3 := compiler.MapValueForKey(m, "scopes")
if v3 != nil {
var err error
- x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+ x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -2841,44 +2735,44 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O
// string token_url = 4;
v4 := compiler.MapValueForKey(m, "tokenUrl")
if v4 != nil {
- x.TokenUrl, ok = v4.(string)
+ x.TokenUrl, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 5;
v5 := compiler.MapValueForKey(m, "description")
if v5 != nil {
- x.Description, ok = v5.(string)
+ x.Description, ok = compiler.StringForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 6;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -2892,7 +2786,7 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O
}
// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not.
-func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) {
+func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) {
errors := make([]error, 0)
x := &Oauth2ImplicitSecurity{}
m, ok := compiler.UnpackMap(in)
@@ -2916,30 +2810,30 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [oauth2]
if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string flow = 2;
v2 := compiler.MapValueForKey(m, "flow")
if v2 != nil {
- x.Flow, ok = v2.(string)
+ x.Flow, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [implicit]
if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -2947,7 +2841,7 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut
v3 := compiler.MapValueForKey(m, "scopes")
if v3 != nil {
var err error
- x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+ x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -2955,44 +2849,44 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut
// string authorization_url = 4;
v4 := compiler.MapValueForKey(m, "authorizationUrl")
if v4 != nil {
- x.AuthorizationUrl, ok = v4.(string)
+ x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 5;
v5 := compiler.MapValueForKey(m, "description")
if v5 != nil {
- x.Description, ok = v5.(string)
+ x.Description, ok = compiler.StringForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 6;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3006,7 +2900,7 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut
}
// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not.
-func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) {
+func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) {
errors := make([]error, 0)
x := &Oauth2PasswordSecurity{}
m, ok := compiler.UnpackMap(in)
@@ -3030,30 +2924,30 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [oauth2]
if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string flow = 2;
v2 := compiler.MapValueForKey(m, "flow")
if v2 != nil {
- x.Flow, ok = v2.(string)
+ x.Flow, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [password]
if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) {
- message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3061,7 +2955,7 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut
v3 := compiler.MapValueForKey(m, "scopes")
if v3 != nil {
var err error
- x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+ x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -3069,44 +2963,44 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut
// string token_url = 4;
v4 := compiler.MapValueForKey(m, "tokenUrl")
if v4 != nil {
- x.TokenUrl, ok = v4.(string)
+ x.TokenUrl, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 5;
v5 := compiler.MapValueForKey(m, "description")
if v5 != nil {
- x.Description, ok = v5.(string)
+ x.Description, ok = compiler.StringForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 6;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3120,7 +3014,7 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut
}
// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not.
-func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) {
+func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) {
errors := make([]error, 0)
x := &Oauth2Scopes{}
m, ok := compiler.UnpackMap(in)
@@ -3131,13 +3025,13 @@ func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes,
// repeated NamedString additional_properties = 1;
// MAP: string
x.AdditionalProperties = make([]*NamedString, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedString{}
pair.Name = k
- pair.Value = v.(string)
+ pair.Value, _ = compiler.StringForScalarNode(v)
x.AdditionalProperties = append(x.AdditionalProperties, pair)
}
}
@@ -3146,7 +3040,7 @@ func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes,
}
// NewOperation creates an object of type Operation if possible, returning an error if not.
-func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) {
+func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) {
errors := make([]error, 0)
x := &Operation{}
m, ok := compiler.UnpackMap(in)
@@ -3170,29 +3064,29 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
// repeated string tags = 1;
v1 := compiler.MapValueForKey(m, "tags")
if v1 != nil {
- v, ok := v1.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v1)
if ok {
- x.Tags = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Tags = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string summary = 2;
v2 := compiler.MapValueForKey(m, "summary")
if v2 != nil {
- x.Summary, ok = v2.(string)
+ x.Summary, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 3;
v3 := compiler.MapValueForKey(m, "description")
if v3 != nil {
- x.Description, ok = v3.(string)
+ x.Description, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3200,7 +3094,7 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
v4 := compiler.MapValueForKey(m, "externalDocs")
if v4 != nil {
var err error
- x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context))
+ x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context))
if err != nil {
errors = append(errors, err)
}
@@ -3208,31 +3102,31 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
// string operation_id = 5;
v5 := compiler.MapValueForKey(m, "operationId")
if v5 != nil {
- x.OperationId, ok = v5.(string)
+ x.OperationId, ok = compiler.StringForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated string produces = 6;
v6 := compiler.MapValueForKey(m, "produces")
if v6 != nil {
- v, ok := v6.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v6)
if ok {
- x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Produces = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated string consumes = 7;
v7 := compiler.MapValueForKey(m, "consumes")
if v7 != nil {
- v, ok := v7.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v7)
if ok {
- x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Consumes = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7)
+ message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3241,10 +3135,10 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
if v8 != nil {
// repeated ParametersItem
x.Parameters = make([]*ParametersItem, 0)
- a, ok := v8.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v8)
if ok {
- for _, item := range a {
- y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
+ for _, item := range a.Content {
+ y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -3256,7 +3150,7 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
v9 := compiler.MapValueForKey(m, "responses")
if v9 != nil {
var err error
- x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context))
+ x.Responses, err = NewResponses(v9, compiler.NewContext("responses", v9, context))
if err != nil {
errors = append(errors, err)
}
@@ -3264,26 +3158,26 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
// repeated string schemes = 10;
v10 := compiler.MapValueForKey(m, "schemes")
if v10 != nil {
- v, ok := v10.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v10)
if ok {
- x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Schemes = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10)
+ message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [http https ws wss]
if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
- message := fmt.Sprintf("has unexpected value for schemes: %+v", v10)
+ message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool deprecated = 11;
v11 := compiler.MapValueForKey(m, "deprecated")
if v11 != nil {
- x.Deprecated, ok = v11.(bool)
+ x.Deprecated, ok = compiler.BoolForScalarNode(v11)
if !ok {
- message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11)
+ message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3292,10 +3186,10 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
if v12 != nil {
// repeated SecurityRequirement
x.Security = make([]*SecurityRequirement, 0)
- a, ok := v12.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v12)
if ok {
- for _, item := range a {
- y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
+ for _, item := range a.Content {
+ y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -3306,26 +3200,26 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
// repeated NamedAny vendor_extension = 13;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3339,7 +3233,7 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error)
}
// NewParameter creates an object of type Parameter if possible, returning an error if not.
-func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) {
+func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) {
errors := make([]error, 0)
x := &Parameter{}
matched := false
@@ -3348,7 +3242,7 @@ func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error)
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context))
+ t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", m, context))
if matchingError == nil {
x.Oneof = &Parameter_BodyParameter{BodyParameter: t}
matched = true
@@ -3362,7 +3256,7 @@ func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error)
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context))
+ t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", m, context))
if matchingError == nil {
x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t}
matched = true
@@ -3374,12 +3268,16 @@ func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error)
if matched {
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
+ } else {
+ message := fmt.Sprintf("contains an invalid Parameter")
+ err := compiler.NewError(context, message)
+ errors = []error{err}
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not.
-func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) {
+func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) {
errors := make([]error, 0)
x := &ParameterDefinitions{}
m, ok := compiler.UnpackMap(in)
@@ -3390,14 +3288,14 @@ func NewParameterDefinitions(in interface{}, context *compiler.Context) (*Parame
// repeated NamedParameter additional_properties = 1;
// MAP: Parameter
x.AdditionalProperties = make([]*NamedParameter, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedParameter{}
pair.Name = k
var err error
- pair.Value, err = NewParameter(v, compiler.NewContext(k, context))
+ pair.Value, err = NewParameter(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3409,7 +3307,7 @@ func NewParameterDefinitions(in interface{}, context *compiler.Context) (*Parame
}
// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not.
-func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) {
+func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) {
errors := make([]error, 0)
x := &ParametersItem{}
matched := false
@@ -3418,7 +3316,7 @@ func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersIt
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewParameter(m, compiler.NewContext("parameter", context))
+ t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context))
if matchingError == nil {
x.Oneof = &ParametersItem_Parameter{Parameter: t}
matched = true
@@ -3432,7 +3330,7 @@ func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersIt
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
+ t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context))
if matchingError == nil {
x.Oneof = &ParametersItem_JsonReference{JsonReference: t}
matched = true
@@ -3444,12 +3342,16 @@ func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersIt
if matched {
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
+ } else {
+ message := fmt.Sprintf("contains an invalid ParametersItem")
+ err := compiler.NewError(context, message)
+ errors = []error{err}
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewPathItem creates an object of type PathItem if possible, returning an error if not.
-func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
+func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) {
errors := make([]error, 0)
x := &PathItem{}
m, ok := compiler.UnpackMap(in)
@@ -3467,9 +3369,9 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
// string _ref = 1;
v1 := compiler.MapValueForKey(m, "$ref")
if v1 != nil {
- x.XRef, ok = v1.(string)
+ x.XRef, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3477,7 +3379,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
v2 := compiler.MapValueForKey(m, "get")
if v2 != nil {
var err error
- x.Get, err = NewOperation(v2, compiler.NewContext("get", context))
+ x.Get, err = NewOperation(v2, compiler.NewContext("get", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -3486,7 +3388,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
v3 := compiler.MapValueForKey(m, "put")
if v3 != nil {
var err error
- x.Put, err = NewOperation(v3, compiler.NewContext("put", context))
+ x.Put, err = NewOperation(v3, compiler.NewContext("put", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -3495,7 +3397,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
v4 := compiler.MapValueForKey(m, "post")
if v4 != nil {
var err error
- x.Post, err = NewOperation(v4, compiler.NewContext("post", context))
+ x.Post, err = NewOperation(v4, compiler.NewContext("post", v4, context))
if err != nil {
errors = append(errors, err)
}
@@ -3504,7 +3406,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
v5 := compiler.MapValueForKey(m, "delete")
if v5 != nil {
var err error
- x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context))
+ x.Delete, err = NewOperation(v5, compiler.NewContext("delete", v5, context))
if err != nil {
errors = append(errors, err)
}
@@ -3513,7 +3415,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
v6 := compiler.MapValueForKey(m, "options")
if v6 != nil {
var err error
- x.Options, err = NewOperation(v6, compiler.NewContext("options", context))
+ x.Options, err = NewOperation(v6, compiler.NewContext("options", v6, context))
if err != nil {
errors = append(errors, err)
}
@@ -3522,7 +3424,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
v7 := compiler.MapValueForKey(m, "head")
if v7 != nil {
var err error
- x.Head, err = NewOperation(v7, compiler.NewContext("head", context))
+ x.Head, err = NewOperation(v7, compiler.NewContext("head", v7, context))
if err != nil {
errors = append(errors, err)
}
@@ -3531,7 +3433,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
v8 := compiler.MapValueForKey(m, "patch")
if v8 != nil {
var err error
- x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context))
+ x.Patch, err = NewOperation(v8, compiler.NewContext("patch", v8, context))
if err != nil {
errors = append(errors, err)
}
@@ -3541,10 +3443,10 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
if v9 != nil {
// repeated ParametersItem
x.Parameters = make([]*ParametersItem, 0)
- a, ok := v9.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v9)
if ok {
- for _, item := range a {
- y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
+ for _, item := range a.Content {
+ y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -3555,26 +3457,26 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
// repeated NamedAny vendor_extension = 10;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3588,7 +3490,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
}
// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not.
-func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) {
+func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) {
errors := make([]error, 0)
x := &PathParameterSubSchema{}
m, ok := compiler.UnpackMap(in)
@@ -3612,66 +3514,66 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
// bool required = 1;
v1 := compiler.MapValueForKey(m, "required")
if v1 != nil {
- x.Required, ok = v1.(bool)
+ x.Required, ok = compiler.BoolForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string in = 2;
v2 := compiler.MapValueForKey(m, "in")
if v2 != nil {
- x.In, ok = v2.(string)
+ x.In, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [path]
if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 3;
v3 := compiler.MapValueForKey(m, "description")
if v3 != nil {
- x.Description, ok = v3.(string)
+ x.Description, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string name = 4;
v4 := compiler.MapValueForKey(m, "name")
if v4 != nil {
- x.Name, ok = v4.(string)
+ x.Name, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// string type = 5;
v5 := compiler.MapValueForKey(m, "type")
if v5 != nil {
- x.Type, ok = v5.(string)
+ x.Type, ok = compiler.StringForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [string number boolean integer array]
if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// string format = 6;
v6 := compiler.MapValueForKey(m, "format")
if v6 != nil {
- x.Format, ok = v6.(string)
+ x.Format, ok = compiler.StringForScalarNode(v6)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3679,7 +3581,7 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
v7 := compiler.MapValueForKey(m, "items")
if v7 != nil {
var err error
- x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
+ x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context))
if err != nil {
errors = append(errors, err)
}
@@ -3687,15 +3589,15 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
// string collection_format = 8;
v8 := compiler.MapValueForKey(m, "collectionFormat")
if v8 != nil {
- x.CollectionFormat, ok = v8.(string)
+ x.CollectionFormat, ok = compiler.StringForScalarNode(v8)
if !ok {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [csv ssv tsv pipes]
if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3703,7 +3605,7 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
v9 := compiler.MapValueForKey(m, "default")
if v9 != nil {
var err error
- x.Default, err = NewAny(v9, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context))
if err != nil {
errors = append(errors, err)
}
@@ -3711,126 +3613,102 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
// float maximum = 10;
v10 := compiler.MapValueForKey(m, "maximum")
if v10 != nil {
- switch v10 := v10.(type) {
- case float64:
- x.Maximum = v10
- case float32:
- x.Maximum = float64(v10)
- case uint64:
- x.Maximum = float64(v10)
- case uint32:
- x.Maximum = float64(v10)
- case int64:
- x.Maximum = float64(v10)
- case int32:
- x.Maximum = float64(v10)
- case int:
- x.Maximum = float64(v10)
- default:
- message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
+ v, ok := compiler.FloatForScalarNode(v10)
+ if ok {
+ x.Maximum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_maximum = 11;
v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
if v11 != nil {
- x.ExclusiveMaximum, ok = v11.(bool)
+ x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
+ message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
// float minimum = 12;
v12 := compiler.MapValueForKey(m, "minimum")
if v12 != nil {
- switch v12 := v12.(type) {
- case float64:
- x.Minimum = v12
- case float32:
- x.Minimum = float64(v12)
- case uint64:
- x.Minimum = float64(v12)
- case uint32:
- x.Minimum = float64(v12)
- case int64:
- x.Minimum = float64(v12)
- case int32:
- x.Minimum = float64(v12)
- case int:
- x.Minimum = float64(v12)
- default:
- message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
+ v, ok := compiler.FloatForScalarNode(v12)
+ if ok {
+ x.Minimum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_minimum = 13;
v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
if v13 != nil {
- x.ExclusiveMinimum, ok = v13.(bool)
+ x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
+ message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_length = 14;
v14 := compiler.MapValueForKey(m, "maxLength")
if v14 != nil {
- t, ok := v14.(int)
+ t, ok := compiler.IntForScalarNode(v14)
if ok {
x.MaxLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
+ message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_length = 15;
v15 := compiler.MapValueForKey(m, "minLength")
if v15 != nil {
- t, ok := v15.(int)
+ t, ok := compiler.IntForScalarNode(v15)
if ok {
x.MinLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
+ message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15))
errors = append(errors, compiler.NewError(context, message))
}
}
// string pattern = 16;
v16 := compiler.MapValueForKey(m, "pattern")
if v16 != nil {
- x.Pattern, ok = v16.(string)
+ x.Pattern, ok = compiler.StringForScalarNode(v16)
if !ok {
- message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
+ message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_items = 17;
v17 := compiler.MapValueForKey(m, "maxItems")
if v17 != nil {
- t, ok := v17.(int)
+ t, ok := compiler.IntForScalarNode(v17)
if ok {
x.MaxItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
+ message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_items = 18;
v18 := compiler.MapValueForKey(m, "minItems")
if v18 != nil {
- t, ok := v18.(int)
+ t, ok := compiler.IntForScalarNode(v18)
if ok {
x.MinItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
+ message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool unique_items = 19;
v19 := compiler.MapValueForKey(m, "uniqueItems")
if v19 != nil {
- x.UniqueItems, ok = v19.(bool)
+ x.UniqueItems, ok = compiler.BoolForScalarNode(v19)
if !ok {
- message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
+ message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -3839,10 +3717,10 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
if v20 != nil {
// repeated Any
x.Enum = make([]*Any, 0)
- a, ok := v20.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v20)
if ok {
- for _, item := range a {
- y, err := NewAny(item, compiler.NewContext("enum", context))
+ for _, item := range a.Content {
+ y, err := NewAny(item, compiler.NewContext("enum", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -3853,49 +3731,37 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
// float multiple_of = 21;
v21 := compiler.MapValueForKey(m, "multipleOf")
if v21 != nil {
- switch v21 := v21.(type) {
- case float64:
- x.MultipleOf = v21
- case float32:
- x.MultipleOf = float64(v21)
- case uint64:
- x.MultipleOf = float64(v21)
- case uint32:
- x.MultipleOf = float64(v21)
- case int64:
- x.MultipleOf = float64(v21)
- case int32:
- x.MultipleOf = float64(v21)
- case int:
- x.MultipleOf = float64(v21)
- default:
- message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
+ v, ok := compiler.FloatForScalarNode(v21)
+ if ok {
+ x.MultipleOf = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 22;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3909,7 +3775,7 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path
}
// NewPaths creates an object of type Paths if possible, returning an error if not.
-func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) {
+func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) {
errors := make([]error, 0)
x := &Paths{}
m, ok := compiler.UnpackMap(in)
@@ -3927,26 +3793,26 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) {
// repeated NamedAny vendor_extension = 1;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3958,15 +3824,15 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) {
// repeated NamedPathItem path = 2;
// MAP: PathItem ^/
x.Path = make([]*NamedPathItem, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "/") {
pair := &NamedPathItem{}
pair.Name = k
var err error
- pair.Value, err = NewPathItem(v, compiler.NewContext(k, context))
+ pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -3979,7 +3845,7 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) {
}
// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not.
-func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) {
+func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) {
errors := make([]error, 0)
x := &PrimitivesItems{}
m, ok := compiler.UnpackMap(in)
@@ -3997,24 +3863,24 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
// string type = 1;
v1 := compiler.MapValueForKey(m, "type")
if v1 != nil {
- x.Type, ok = v1.(string)
+ x.Type, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [string number integer boolean array]
if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string format = 2;
v2 := compiler.MapValueForKey(m, "format")
if v2 != nil {
- x.Format, ok = v2.(string)
+ x.Format, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4022,7 +3888,7 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
v3 := compiler.MapValueForKey(m, "items")
if v3 != nil {
var err error
- x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
+ x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -4030,15 +3896,15 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
// string collection_format = 4;
v4 := compiler.MapValueForKey(m, "collectionFormat")
if v4 != nil {
- x.CollectionFormat, ok = v4.(string)
+ x.CollectionFormat, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [csv ssv tsv pipes]
if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4046,7 +3912,7 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
v5 := compiler.MapValueForKey(m, "default")
if v5 != nil {
var err error
- x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context))
if err != nil {
errors = append(errors, err)
}
@@ -4054,126 +3920,102 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
// float maximum = 6;
v6 := compiler.MapValueForKey(m, "maximum")
if v6 != nil {
- switch v6 := v6.(type) {
- case float64:
- x.Maximum = v6
- case float32:
- x.Maximum = float64(v6)
- case uint64:
- x.Maximum = float64(v6)
- case uint32:
- x.Maximum = float64(v6)
- case int64:
- x.Maximum = float64(v6)
- case int32:
- x.Maximum = float64(v6)
- case int:
- x.Maximum = float64(v6)
- default:
- message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
+ v, ok := compiler.FloatForScalarNode(v6)
+ if ok {
+ x.Maximum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_maximum = 7;
v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
if v7 != nil {
- x.ExclusiveMaximum, ok = v7.(bool)
+ x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
+ message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
// float minimum = 8;
v8 := compiler.MapValueForKey(m, "minimum")
if v8 != nil {
- switch v8 := v8.(type) {
- case float64:
- x.Minimum = v8
- case float32:
- x.Minimum = float64(v8)
- case uint64:
- x.Minimum = float64(v8)
- case uint32:
- x.Minimum = float64(v8)
- case int64:
- x.Minimum = float64(v8)
- case int32:
- x.Minimum = float64(v8)
- case int:
- x.Minimum = float64(v8)
- default:
- message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
+ v, ok := compiler.FloatForScalarNode(v8)
+ if ok {
+ x.Minimum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_minimum = 9;
v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
if v9 != nil {
- x.ExclusiveMinimum, ok = v9.(bool)
+ x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
+ message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_length = 10;
v10 := compiler.MapValueForKey(m, "maxLength")
if v10 != nil {
- t, ok := v10.(int)
+ t, ok := compiler.IntForScalarNode(v10)
if ok {
x.MaxLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
+ message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_length = 11;
v11 := compiler.MapValueForKey(m, "minLength")
if v11 != nil {
- t, ok := v11.(int)
+ t, ok := compiler.IntForScalarNode(v11)
if ok {
x.MinLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
+ message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
// string pattern = 12;
v12 := compiler.MapValueForKey(m, "pattern")
if v12 != nil {
- x.Pattern, ok = v12.(string)
+ x.Pattern, ok = compiler.StringForScalarNode(v12)
if !ok {
- message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
+ message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_items = 13;
v13 := compiler.MapValueForKey(m, "maxItems")
if v13 != nil {
- t, ok := v13.(int)
+ t, ok := compiler.IntForScalarNode(v13)
if ok {
x.MaxItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
+ message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_items = 14;
v14 := compiler.MapValueForKey(m, "minItems")
if v14 != nil {
- t, ok := v14.(int)
+ t, ok := compiler.IntForScalarNode(v14)
if ok {
x.MinItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
+ message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool unique_items = 15;
v15 := compiler.MapValueForKey(m, "uniqueItems")
if v15 != nil {
- x.UniqueItems, ok = v15.(bool)
+ x.UniqueItems, ok = compiler.BoolForScalarNode(v15)
if !ok {
- message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
+ message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4182,10 +4024,10 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
if v16 != nil {
// repeated Any
x.Enum = make([]*Any, 0)
- a, ok := v16.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v16)
if ok {
- for _, item := range a {
- y, err := NewAny(item, compiler.NewContext("enum", context))
+ for _, item := range a.Content {
+ y, err := NewAny(item, compiler.NewContext("enum", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -4196,49 +4038,37 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
// float multiple_of = 17;
v17 := compiler.MapValueForKey(m, "multipleOf")
if v17 != nil {
- switch v17 := v17.(type) {
- case float64:
- x.MultipleOf = v17
- case float32:
- x.MultipleOf = float64(v17)
- case uint64:
- x.MultipleOf = float64(v17)
- case uint32:
- x.MultipleOf = float64(v17)
- case int64:
- x.MultipleOf = float64(v17)
- case int32:
- x.MultipleOf = float64(v17)
- case int:
- x.MultipleOf = float64(v17)
- default:
- message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
+ v, ok := compiler.FloatForScalarNode(v17)
+ if ok {
+ x.MultipleOf = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 18;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -4252,7 +4082,7 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI
}
// NewProperties creates an object of type Properties if possible, returning an error if not.
-func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) {
+func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) {
errors := make([]error, 0)
x := &Properties{}
m, ok := compiler.UnpackMap(in)
@@ -4263,14 +4093,14 @@ func NewProperties(in interface{}, context *compiler.Context) (*Properties, erro
// repeated NamedSchema additional_properties = 1;
// MAP: Schema
x.AdditionalProperties = make([]*NamedSchema, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedSchema{}
pair.Name = k
var err error
- pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
+ pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -4282,7 +4112,7 @@ func NewProperties(in interface{}, context *compiler.Context) (*Properties, erro
}
// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not.
-func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) {
+func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) {
errors := make([]error, 0)
x := &QueryParameterSubSchema{}
m, ok := compiler.UnpackMap(in)
@@ -4300,75 +4130,75 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
// bool required = 1;
v1 := compiler.MapValueForKey(m, "required")
if v1 != nil {
- x.Required, ok = v1.(bool)
+ x.Required, ok = compiler.BoolForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string in = 2;
v2 := compiler.MapValueForKey(m, "in")
if v2 != nil {
- x.In, ok = v2.(string)
+ x.In, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [query]
if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) {
- message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 3;
v3 := compiler.MapValueForKey(m, "description")
if v3 != nil {
- x.Description, ok = v3.(string)
+ x.Description, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string name = 4;
v4 := compiler.MapValueForKey(m, "name")
if v4 != nil {
- x.Name, ok = v4.(string)
+ x.Name, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool allow_empty_value = 5;
v5 := compiler.MapValueForKey(m, "allowEmptyValue")
if v5 != nil {
- x.AllowEmptyValue, ok = v5.(bool)
+ x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// string type = 6;
v6 := compiler.MapValueForKey(m, "type")
if v6 != nil {
- x.Type, ok = v6.(string)
+ x.Type, ok = compiler.StringForScalarNode(v6)
if !ok {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [string number boolean integer array]
if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
- message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+ message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// string format = 7;
v7 := compiler.MapValueForKey(m, "format")
if v7 != nil {
- x.Format, ok = v7.(string)
+ x.Format, ok = compiler.StringForScalarNode(v7)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4376,7 +4206,7 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
v8 := compiler.MapValueForKey(m, "items")
if v8 != nil {
var err error
- x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
+ x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context))
if err != nil {
errors = append(errors, err)
}
@@ -4384,15 +4214,15 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
// string collection_format = 9;
v9 := compiler.MapValueForKey(m, "collectionFormat")
if v9 != nil {
- x.CollectionFormat, ok = v9.(string)
+ x.CollectionFormat, ok = compiler.StringForScalarNode(v9)
if !ok {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9))
errors = append(errors, compiler.NewError(context, message))
}
// check for valid enum values
// [csv ssv tsv pipes multi]
if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
- message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+ message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4400,7 +4230,7 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
v10 := compiler.MapValueForKey(m, "default")
if v10 != nil {
var err error
- x.Default, err = NewAny(v10, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context))
if err != nil {
errors = append(errors, err)
}
@@ -4408,126 +4238,102 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
// float maximum = 11;
v11 := compiler.MapValueForKey(m, "maximum")
if v11 != nil {
- switch v11 := v11.(type) {
- case float64:
- x.Maximum = v11
- case float32:
- x.Maximum = float64(v11)
- case uint64:
- x.Maximum = float64(v11)
- case uint32:
- x.Maximum = float64(v11)
- case int64:
- x.Maximum = float64(v11)
- case int32:
- x.Maximum = float64(v11)
- case int:
- x.Maximum = float64(v11)
- default:
- message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
+ v, ok := compiler.FloatForScalarNode(v11)
+ if ok {
+ x.Maximum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_maximum = 12;
v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
if v12 != nil {
- x.ExclusiveMaximum, ok = v12.(bool)
+ x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
+ message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12))
errors = append(errors, compiler.NewError(context, message))
}
}
// float minimum = 13;
v13 := compiler.MapValueForKey(m, "minimum")
if v13 != nil {
- switch v13 := v13.(type) {
- case float64:
- x.Minimum = v13
- case float32:
- x.Minimum = float64(v13)
- case uint64:
- x.Minimum = float64(v13)
- case uint32:
- x.Minimum = float64(v13)
- case int64:
- x.Minimum = float64(v13)
- case int32:
- x.Minimum = float64(v13)
- case int:
- x.Minimum = float64(v13)
- default:
- message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
+ v, ok := compiler.FloatForScalarNode(v13)
+ if ok {
+ x.Minimum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_minimum = 14;
v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
if v14 != nil {
- x.ExclusiveMinimum, ok = v14.(bool)
+ x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
+ message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_length = 15;
v15 := compiler.MapValueForKey(m, "maxLength")
if v15 != nil {
- t, ok := v15.(int)
+ t, ok := compiler.IntForScalarNode(v15)
if ok {
x.MaxLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
+ message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_length = 16;
v16 := compiler.MapValueForKey(m, "minLength")
if v16 != nil {
- t, ok := v16.(int)
+ t, ok := compiler.IntForScalarNode(v16)
if ok {
x.MinLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
+ message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16))
errors = append(errors, compiler.NewError(context, message))
}
}
// string pattern = 17;
v17 := compiler.MapValueForKey(m, "pattern")
if v17 != nil {
- x.Pattern, ok = v17.(string)
+ x.Pattern, ok = compiler.StringForScalarNode(v17)
if !ok {
- message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
+ message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_items = 18;
v18 := compiler.MapValueForKey(m, "maxItems")
if v18 != nil {
- t, ok := v18.(int)
+ t, ok := compiler.IntForScalarNode(v18)
if ok {
x.MaxItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
+ message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_items = 19;
v19 := compiler.MapValueForKey(m, "minItems")
if v19 != nil {
- t, ok := v19.(int)
+ t, ok := compiler.IntForScalarNode(v19)
if ok {
x.MinItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
+ message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool unique_items = 20;
v20 := compiler.MapValueForKey(m, "uniqueItems")
if v20 != nil {
- x.UniqueItems, ok = v20.(bool)
+ x.UniqueItems, ok = compiler.BoolForScalarNode(v20)
if !ok {
- message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
+ message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4536,10 +4342,10 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
if v21 != nil {
// repeated Any
x.Enum = make([]*Any, 0)
- a, ok := v21.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v21)
if ok {
- for _, item := range a {
- y, err := NewAny(item, compiler.NewContext("enum", context))
+ for _, item := range a.Content {
+ y, err := NewAny(item, compiler.NewContext("enum", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -4550,49 +4356,37 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
// float multiple_of = 22;
v22 := compiler.MapValueForKey(m, "multipleOf")
if v22 != nil {
- switch v22 := v22.(type) {
- case float64:
- x.MultipleOf = v22
- case float32:
- x.MultipleOf = float64(v22)
- case uint64:
- x.MultipleOf = float64(v22)
- case uint32:
- x.MultipleOf = float64(v22)
- case int64:
- x.MultipleOf = float64(v22)
- case int32:
- x.MultipleOf = float64(v22)
- case int:
- x.MultipleOf = float64(v22)
- default:
- message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
+ v, ok := compiler.FloatForScalarNode(v22)
+ if ok {
+ x.MultipleOf = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 23;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -4606,7 +4400,7 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que
}
// NewResponse creates an object of type Response if possible, returning an error if not.
-func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
+func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) {
errors := make([]error, 0)
x := &Response{}
m, ok := compiler.UnpackMap(in)
@@ -4630,9 +4424,9 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
// string description = 1;
v1 := compiler.MapValueForKey(m, "description")
if v1 != nil {
- x.Description, ok = v1.(string)
+ x.Description, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4640,7 +4434,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
v2 := compiler.MapValueForKey(m, "schema")
if v2 != nil {
var err error
- x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context))
+ x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", v2, context))
if err != nil {
errors = append(errors, err)
}
@@ -4649,7 +4443,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
v3 := compiler.MapValueForKey(m, "headers")
if v3 != nil {
var err error
- x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context))
+ x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -4658,7 +4452,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
v4 := compiler.MapValueForKey(m, "examples")
if v4 != nil {
var err error
- x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context))
+ x.Examples, err = NewExamples(v4, compiler.NewContext("examples", v4, context))
if err != nil {
errors = append(errors, err)
}
@@ -4666,26 +4460,26 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
// repeated NamedAny vendor_extension = 5;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -4699,7 +4493,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
}
// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not.
-func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) {
+func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) {
errors := make([]error, 0)
x := &ResponseDefinitions{}
m, ok := compiler.UnpackMap(in)
@@ -4710,14 +4504,14 @@ func NewResponseDefinitions(in interface{}, context *compiler.Context) (*Respons
// repeated NamedResponse additional_properties = 1;
// MAP: Response
x.AdditionalProperties = make([]*NamedResponse, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedResponse{}
pair.Name = k
var err error
- pair.Value, err = NewResponse(v, compiler.NewContext(k, context))
+ pair.Value, err = NewResponse(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -4729,7 +4523,7 @@ func NewResponseDefinitions(in interface{}, context *compiler.Context) (*Respons
}
// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not.
-func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) {
+func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) {
errors := make([]error, 0)
x := &ResponseValue{}
matched := false
@@ -4738,7 +4532,7 @@ func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewResponse(m, compiler.NewContext("response", context))
+ t, matchingError := NewResponse(m, compiler.NewContext("response", m, context))
if matchingError == nil {
x.Oneof = &ResponseValue_Response{Response: t}
matched = true
@@ -4752,7 +4546,7 @@ func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
+ t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context))
if matchingError == nil {
x.Oneof = &ResponseValue_JsonReference{JsonReference: t}
matched = true
@@ -4764,12 +4558,16 @@ func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue
if matched {
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
+ } else {
+ message := fmt.Sprintf("contains an invalid ResponseValue")
+ err := compiler.NewError(context, message)
+ errors = []error{err}
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewResponses creates an object of type Responses if possible, returning an error if not.
-func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) {
+func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) {
errors := make([]error, 0)
x := &Responses{}
m, ok := compiler.UnpackMap(in)
@@ -4787,15 +4585,15 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error)
// repeated NamedResponseValue response_code = 1;
// MAP: ResponseValue ^([0-9]{3})$|^(default)$
x.ResponseCode = make([]*NamedResponseValue, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if pattern2.MatchString(k) {
pair := &NamedResponseValue{}
pair.Name = k
var err error
- pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context))
+ pair.Value, err = NewResponseValue(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -4806,26 +4604,26 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error)
// repeated NamedAny vendor_extension = 2;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -4839,7 +4637,7 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error)
}
// NewSchema creates an object of type Schema if possible, returning an error if not.
-func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
+func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) {
errors := make([]error, 0)
x := &Schema{}
m, ok := compiler.UnpackMap(in)
@@ -4857,36 +4655,36 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
// string _ref = 1;
v1 := compiler.MapValueForKey(m, "$ref")
if v1 != nil {
- x.XRef, ok = v1.(string)
+ x.XRef, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string format = 2;
v2 := compiler.MapValueForKey(m, "format")
if v2 != nil {
- x.Format, ok = v2.(string)
+ x.Format, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string title = 3;
v3 := compiler.MapValueForKey(m, "title")
if v3 != nil {
- x.Title, ok = v3.(string)
+ x.Title, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 4;
v4 := compiler.MapValueForKey(m, "description")
if v4 != nil {
- x.Description, ok = v4.(string)
+ x.Description, ok = compiler.StringForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -4894,7 +4692,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v5 := compiler.MapValueForKey(m, "default")
if v5 != nil {
var err error
- x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+ x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context))
if err != nil {
errors = append(errors, err)
}
@@ -4902,182 +4700,146 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
// float multiple_of = 6;
v6 := compiler.MapValueForKey(m, "multipleOf")
if v6 != nil {
- switch v6 := v6.(type) {
- case float64:
- x.MultipleOf = v6
- case float32:
- x.MultipleOf = float64(v6)
- case uint64:
- x.MultipleOf = float64(v6)
- case uint32:
- x.MultipleOf = float64(v6)
- case int64:
- x.MultipleOf = float64(v6)
- case int32:
- x.MultipleOf = float64(v6)
- case int:
- x.MultipleOf = float64(v6)
- default:
- message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6)
+ v, ok := compiler.FloatForScalarNode(v6)
+ if ok {
+ x.MultipleOf = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6))
errors = append(errors, compiler.NewError(context, message))
}
}
// float maximum = 7;
v7 := compiler.MapValueForKey(m, "maximum")
if v7 != nil {
- switch v7 := v7.(type) {
- case float64:
- x.Maximum = v7
- case float32:
- x.Maximum = float64(v7)
- case uint64:
- x.Maximum = float64(v7)
- case uint32:
- x.Maximum = float64(v7)
- case int64:
- x.Maximum = float64(v7)
- case int32:
- x.Maximum = float64(v7)
- case int:
- x.Maximum = float64(v7)
- default:
- message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7)
+ v, ok := compiler.FloatForScalarNode(v7)
+ if ok {
+ x.Maximum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_maximum = 8;
v8 := compiler.MapValueForKey(m, "exclusiveMaximum")
if v8 != nil {
- x.ExclusiveMaximum, ok = v8.(bool)
+ x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8)
+ message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8))
errors = append(errors, compiler.NewError(context, message))
}
}
// float minimum = 9;
v9 := compiler.MapValueForKey(m, "minimum")
if v9 != nil {
- switch v9 := v9.(type) {
- case float64:
- x.Minimum = v9
- case float32:
- x.Minimum = float64(v9)
- case uint64:
- x.Minimum = float64(v9)
- case uint32:
- x.Minimum = float64(v9)
- case int64:
- x.Minimum = float64(v9)
- case int32:
- x.Minimum = float64(v9)
- case int:
- x.Minimum = float64(v9)
- default:
- message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9)
+ v, ok := compiler.FloatForScalarNode(v9)
+ if ok {
+ x.Minimum = v
+ } else {
+ message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool exclusive_minimum = 10;
v10 := compiler.MapValueForKey(m, "exclusiveMinimum")
if v10 != nil {
- x.ExclusiveMinimum, ok = v10.(bool)
+ x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10)
if !ok {
- message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10)
+ message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_length = 11;
v11 := compiler.MapValueForKey(m, "maxLength")
if v11 != nil {
- t, ok := v11.(int)
+ t, ok := compiler.IntForScalarNode(v11)
if ok {
x.MaxLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11)
+ message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_length = 12;
v12 := compiler.MapValueForKey(m, "minLength")
if v12 != nil {
- t, ok := v12.(int)
+ t, ok := compiler.IntForScalarNode(v12)
if ok {
x.MinLength = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12)
+ message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12))
errors = append(errors, compiler.NewError(context, message))
}
}
// string pattern = 13;
v13 := compiler.MapValueForKey(m, "pattern")
if v13 != nil {
- x.Pattern, ok = v13.(string)
+ x.Pattern, ok = compiler.StringForScalarNode(v13)
if !ok {
- message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13)
+ message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_items = 14;
v14 := compiler.MapValueForKey(m, "maxItems")
if v14 != nil {
- t, ok := v14.(int)
+ t, ok := compiler.IntForScalarNode(v14)
if ok {
x.MaxItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14)
+ message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_items = 15;
v15 := compiler.MapValueForKey(m, "minItems")
if v15 != nil {
- t, ok := v15.(int)
+ t, ok := compiler.IntForScalarNode(v15)
if ok {
x.MinItems = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15)
+ message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool unique_items = 16;
v16 := compiler.MapValueForKey(m, "uniqueItems")
if v16 != nil {
- x.UniqueItems, ok = v16.(bool)
+ x.UniqueItems, ok = compiler.BoolForScalarNode(v16)
if !ok {
- message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16)
+ message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 max_properties = 17;
v17 := compiler.MapValueForKey(m, "maxProperties")
if v17 != nil {
- t, ok := v17.(int)
+ t, ok := compiler.IntForScalarNode(v17)
if ok {
x.MaxProperties = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17)
+ message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17))
errors = append(errors, compiler.NewError(context, message))
}
}
// int64 min_properties = 18;
v18 := compiler.MapValueForKey(m, "minProperties")
if v18 != nil {
- t, ok := v18.(int)
+ t, ok := compiler.IntForScalarNode(v18)
if ok {
x.MinProperties = int64(t)
} else {
- message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18)
+ message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated string required = 19;
v19 := compiler.MapValueForKey(m, "required")
if v19 != nil {
- v, ok := v19.([]interface{})
+ v, ok := compiler.SequenceNodeForNode(v19)
if ok {
- x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
+ x.Required = compiler.StringArrayForSequenceNode(v)
} else {
- message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19)
+ message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -5086,10 +4848,10 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
if v20 != nil {
// repeated Any
x.Enum = make([]*Any, 0)
- a, ok := v20.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v20)
if ok {
- for _, item := range a {
- y, err := NewAny(item, compiler.NewContext("enum", context))
+ for _, item := range a.Content {
+ y, err := NewAny(item, compiler.NewContext("enum", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -5101,7 +4863,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v21 := compiler.MapValueForKey(m, "additionalProperties")
if v21 != nil {
var err error
- x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context))
+ x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", v21, context))
if err != nil {
errors = append(errors, err)
}
@@ -5110,7 +4872,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v22 := compiler.MapValueForKey(m, "type")
if v22 != nil {
var err error
- x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context))
+ x.Type, err = NewTypeItem(v22, compiler.NewContext("type", v22, context))
if err != nil {
errors = append(errors, err)
}
@@ -5119,7 +4881,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v23 := compiler.MapValueForKey(m, "items")
if v23 != nil {
var err error
- x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context))
+ x.Items, err = NewItemsItem(v23, compiler.NewContext("items", v23, context))
if err != nil {
errors = append(errors, err)
}
@@ -5129,10 +4891,10 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
if v24 != nil {
// repeated Schema
x.AllOf = make([]*Schema, 0)
- a, ok := v24.([]interface{})
+ a, ok := compiler.SequenceNodeForNode(v24)
if ok {
- for _, item := range a {
- y, err := NewSchema(item, compiler.NewContext("allOf", context))
+ for _, item := range a.Content {
+ y, err := NewSchema(item, compiler.NewContext("allOf", item, context))
if err != nil {
errors = append(errors, err)
}
@@ -5144,7 +4906,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v25 := compiler.MapValueForKey(m, "properties")
if v25 != nil {
var err error
- x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context))
+ x.Properties, err = NewProperties(v25, compiler.NewContext("properties", v25, context))
if err != nil {
errors = append(errors, err)
}
@@ -5152,18 +4914,18 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
// string discriminator = 26;
v26 := compiler.MapValueForKey(m, "discriminator")
if v26 != nil {
- x.Discriminator, ok = v26.(string)
+ x.Discriminator, ok = compiler.StringForScalarNode(v26)
if !ok {
- message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26)
+ message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool read_only = 27;
v27 := compiler.MapValueForKey(m, "readOnly")
if v27 != nil {
- x.ReadOnly, ok = v27.(bool)
+ x.ReadOnly, ok = compiler.BoolForScalarNode(v27)
if !ok {
- message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27)
+ message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -5171,7 +4933,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v28 := compiler.MapValueForKey(m, "xml")
if v28 != nil {
var err error
- x.Xml, err = NewXml(v28, compiler.NewContext("xml", context))
+ x.Xml, err = NewXml(v28, compiler.NewContext("xml", v28, context))
if err != nil {
errors = append(errors, err)
}
@@ -5180,7 +4942,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v29 := compiler.MapValueForKey(m, "externalDocs")
if v29 != nil {
var err error
- x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context))
+ x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", v29, context))
if err != nil {
errors = append(errors, err)
}
@@ -5189,7 +4951,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
v30 := compiler.MapValueForKey(m, "example")
if v30 != nil {
var err error
- x.Example, err = NewAny(v30, compiler.NewContext("example", context))
+ x.Example, err = NewAny(v30, compiler.NewContext("example", v30, context))
if err != nil {
errors = append(errors, err)
}
@@ -5197,26 +4959,26 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
// repeated NamedAny vendor_extension = 31;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -5230,7 +4992,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
}
// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not.
-func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) {
+func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) {
errors := make([]error, 0)
x := &SchemaItem{}
matched := false
@@ -5239,7 +5001,7 @@ func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, erro
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
+ t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context))
if matchingError == nil {
x.Oneof = &SchemaItem_Schema{Schema: t}
matched = true
@@ -5253,7 +5015,7 @@ func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, erro
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context))
+ t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", m, context))
if matchingError == nil {
x.Oneof = &SchemaItem_FileSchema{FileSchema: t}
matched = true
@@ -5265,12 +5027,16 @@ func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, erro
if matched {
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
+ } else {
+ message := fmt.Sprintf("contains an invalid SchemaItem")
+ err := compiler.NewError(context, message)
+ errors = []error{err}
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not.
-func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) {
+func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) {
errors := make([]error, 0)
x := &SecurityDefinitions{}
m, ok := compiler.UnpackMap(in)
@@ -5281,14 +5047,14 @@ func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*Securit
// repeated NamedSecurityDefinitionsItem additional_properties = 1;
// MAP: SecurityDefinitionsItem
x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedSecurityDefinitionsItem{}
pair.Name = k
var err error
- pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context))
+ pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -5300,7 +5066,7 @@ func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*Securit
}
// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not.
-func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) {
+func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) {
errors := make([]error, 0)
x := &SecurityDefinitionsItem{}
matched := false
@@ -5309,7 +5075,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context))
+ t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", m, context))
if matchingError == nil {
x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t}
matched = true
@@ -5323,7 +5089,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context))
+ t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", m, context))
if matchingError == nil {
x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t}
matched = true
@@ -5337,7 +5103,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context))
+ t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", m, context))
if matchingError == nil {
x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t}
matched = true
@@ -5351,7 +5117,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context))
+ t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", m, context))
if matchingError == nil {
x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t}
matched = true
@@ -5365,7 +5131,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context))
+ t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", m, context))
if matchingError == nil {
x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t}
matched = true
@@ -5379,7 +5145,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec
m, ok := compiler.UnpackMap(in)
if ok {
// errors might be ok here, they mean we just don't have the right subtype
- t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context))
+ t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", m, context))
if matchingError == nil {
x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t}
matched = true
@@ -5391,12 +5157,16 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec
if matched {
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
+ } else {
+ message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem")
+ err := compiler.NewError(context, message)
+ errors = []error{err}
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not.
-func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) {
+func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) {
errors := make([]error, 0)
x := &SecurityRequirement{}
m, ok := compiler.UnpackMap(in)
@@ -5407,14 +5177,14 @@ func NewSecurityRequirement(in interface{}, context *compiler.Context) (*Securit
// repeated NamedStringArray additional_properties = 1;
// MAP: StringArray
x.AdditionalProperties = make([]*NamedStringArray, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedStringArray{}
pair.Name = k
var err error
- pair.Value, err = NewStringArray(v, compiler.NewContext(k, context))
+ pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -5426,24 +5196,19 @@ func NewSecurityRequirement(in interface{}, context *compiler.Context) (*Securit
}
// NewStringArray creates an object of type StringArray if possible, returning an error if not.
-func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) {
+func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) {
errors := make([]error, 0)
x := &StringArray{}
- a, ok := in.([]interface{})
- if !ok {
- message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in)
- errors = append(errors, compiler.NewError(context, message))
- } else {
- x.Value = make([]string, 0)
- for _, s := range a {
- x.Value = append(x.Value, s.(string))
- }
+ x.Value = make([]string, 0)
+ for _, node := range in.Content {
+ s, _ := compiler.StringForScalarNode(node)
+ x.Value = append(x.Value, s)
}
return x, compiler.NewErrorGroupOrNil(errors)
}
// NewTag creates an object of type Tag if possible, returning an error if not.
-func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
+func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) {
errors := make([]error, 0)
x := &Tag{}
m, ok := compiler.UnpackMap(in)
@@ -5467,18 +5232,18 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string description = 2;
v2 := compiler.MapValueForKey(m, "description")
if v2 != nil {
- x.Description, ok = v2.(string)
+ x.Description, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
@@ -5486,7 +5251,7 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
v3 := compiler.MapValueForKey(m, "externalDocs")
if v3 != nil {
var err error
- x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context))
+ x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context))
if err != nil {
errors = append(errors, err)
}
@@ -5494,26 +5259,26 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
// repeated NamedAny vendor_extension = 4;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -5527,17 +5292,19 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
}
// NewTypeItem creates an object of type TypeItem if possible, returning an error if not.
-func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) {
+func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) {
errors := make([]error, 0)
x := &TypeItem{}
- switch in := in.(type) {
- case string:
+ v1 := in
+ switch v1.Kind {
+ case yaml.ScalarNode:
x.Value = make([]string, 0)
- x.Value = append(x.Value, in)
- case []interface{}:
+ x.Value = append(x.Value, v1.Value)
+ case yaml.SequenceNode:
x.Value = make([]string, 0)
- for _, v := range in {
- value, ok := v.(string)
+ for _, v := range v1.Content {
+ value := v.Value
+ ok := v.Kind == yaml.ScalarNode
if ok {
x.Value = append(x.Value, value)
} else {
@@ -5553,7 +5320,7 @@ func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) {
}
// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not.
-func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) {
+func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) {
errors := make([]error, 0)
x := &VendorExtension{}
m, ok := compiler.UnpackMap(in)
@@ -5564,25 +5331,25 @@ func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExten
// repeated NamedAny additional_properties = 1;
// MAP: Any
x.AdditionalProperties = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -5595,7 +5362,7 @@ func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExten
}
// NewXml creates an object of type Xml if possible, returning an error if not.
-func NewXml(in interface{}, context *compiler.Context) (*Xml, error) {
+func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) {
errors := make([]error, 0)
x := &Xml{}
m, ok := compiler.UnpackMap(in)
@@ -5613,71 +5380,71 @@ func NewXml(in interface{}, context *compiler.Context) (*Xml, error) {
// string name = 1;
v1 := compiler.MapValueForKey(m, "name")
if v1 != nil {
- x.Name, ok = v1.(string)
+ x.Name, ok = compiler.StringForScalarNode(v1)
if !ok {
- message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+ message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1))
errors = append(errors, compiler.NewError(context, message))
}
}
// string namespace = 2;
v2 := compiler.MapValueForKey(m, "namespace")
if v2 != nil {
- x.Namespace, ok = v2.(string)
+ x.Namespace, ok = compiler.StringForScalarNode(v2)
if !ok {
- message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2)
+ message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2))
errors = append(errors, compiler.NewError(context, message))
}
}
// string prefix = 3;
v3 := compiler.MapValueForKey(m, "prefix")
if v3 != nil {
- x.Prefix, ok = v3.(string)
+ x.Prefix, ok = compiler.StringForScalarNode(v3)
if !ok {
- message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3)
+ message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool attribute = 4;
v4 := compiler.MapValueForKey(m, "attribute")
if v4 != nil {
- x.Attribute, ok = v4.(bool)
+ x.Attribute, ok = compiler.BoolForScalarNode(v4)
if !ok {
- message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4)
+ message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4))
errors = append(errors, compiler.NewError(context, message))
}
}
// bool wrapped = 5;
v5 := compiler.MapValueForKey(m, "wrapped")
if v5 != nil {
- x.Wrapped, ok = v5.(bool)
+ x.Wrapped, ok = compiler.BoolForScalarNode(v5)
if !ok {
- message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5)
+ message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5))
errors = append(errors, compiler.NewError(context, message))
}
}
// repeated NamedAny vendor_extension = 6;
// MAP: Any ^x-
x.VendorExtension = make([]*NamedAny, 0)
- for _, item := range m {
- k, ok := compiler.StringValue(item.Key)
+ for i := 0; i < len(m.Content); i += 2 {
+ k, ok := compiler.StringForScalarNode(m.Content[i])
if ok {
- v := item.Value
+ v := m.Content[i+1]
if strings.HasPrefix(k, "x-") {
pair := &NamedAny{}
pair.Name = k
result := &Any{}
- handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+ handled, resultFromExt, err := compiler.CallExtension(context, v, k)
if handled {
if err != nil {
errors = append(errors, err)
} else {
- bytes, _ := yaml.Marshal(v)
+ bytes := compiler.Marshal(v)
result.Yaml = string(bytes)
result.Value = resultFromExt
pair.Value = result
}
} else {
- pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+ pair.Value, err = NewAny(v, compiler.NewContext(k, v, context))
if err != nil {
errors = append(errors, err)
}
@@ -5691,7 +5458,7 @@ func NewXml(in interface{}, context *compiler.Context) (*Xml, error) {
}
// ResolveReferences resolves references found inside AdditionalPropertiesItem objects.
-func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) {
+func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
{
p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema)
@@ -5706,13 +5473,13 @@ func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{},
}
// ResolveReferences resolves references found inside Any objects.
-func (m *Any) ResolveReferences(root string) (interface{}, error) {
+func (m *Any) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
return nil, compiler.NewErrorGroupOrNil(errors)
}
// ResolveReferences resolves references found inside ApiKeySecurity objects.
-func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) {
+func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.VendorExtension {
if item != nil {
@@ -5726,7 +5493,7 @@ func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects.
-func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) {
+func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.VendorExtension {
if item != nil {
@@ -5740,7 +5507,7 @@ func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{
}
// ResolveReferences resolves references found inside BodyParameter objects.
-func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) {
+func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Schema != nil {
_, err := m.Schema.ResolveReferences(root)
@@ -5760,7 +5527,7 @@ func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Contact objects.
-func (m *Contact) ResolveReferences(root string) (interface{}, error) {
+func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.VendorExtension {
if item != nil {
@@ -5774,7 +5541,7 @@ func (m *Contact) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Default objects.
-func (m *Default) ResolveReferences(root string) (interface{}, error) {
+func (m *Default) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -5788,7 +5555,7 @@ func (m *Default) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Definitions objects.
-func (m *Definitions) ResolveReferences(root string) (interface{}, error) {
+func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -5802,7 +5569,7 @@ func (m *Definitions) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Document objects.
-func (m *Document) ResolveReferences(root string) (interface{}, error) {
+func (m *Document) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Info != nil {
_, err := m.Info.ResolveReferences(root)
@@ -5874,7 +5641,7 @@ func (m *Document) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Examples objects.
-func (m *Examples) ResolveReferences(root string) (interface{}, error) {
+func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -5888,7 +5655,7 @@ func (m *Examples) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside ExternalDocs objects.
-func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) {
+func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.VendorExtension {
if item != nil {
@@ -5902,7 +5669,7 @@ func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside FileSchema objects.
-func (m *FileSchema) ResolveReferences(root string) (interface{}, error) {
+func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Default != nil {
_, err := m.Default.ResolveReferences(root)
@@ -5934,7 +5701,7 @@ func (m *FileSchema) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside FormDataParameterSubSchema objects.
-func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Items != nil {
_, err := m.Items.ResolveReferences(root)
@@ -5968,7 +5735,7 @@ func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}
}
// ResolveReferences resolves references found inside Header objects.
-func (m *Header) ResolveReferences(root string) (interface{}, error) {
+func (m *Header) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Items != nil {
_, err := m.Items.ResolveReferences(root)
@@ -6002,7 +5769,7 @@ func (m *Header) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside HeaderParameterSubSchema objects.
-func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Items != nil {
_, err := m.Items.ResolveReferences(root)
@@ -6036,7 +5803,7 @@ func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{},
}
// ResolveReferences resolves references found inside Headers objects.
-func (m *Headers) ResolveReferences(root string) (interface{}, error) {
+func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -6050,7 +5817,7 @@ func (m *Headers) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Info objects.
-func (m *Info) ResolveReferences(root string) (interface{}, error) {
+func (m *Info) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Contact != nil {
_, err := m.Contact.ResolveReferences(root)
@@ -6076,7 +5843,7 @@ func (m *Info) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside ItemsItem objects.
-func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) {
+func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.Schema {
if item != nil {
@@ -6090,7 +5857,7 @@ func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside JsonReference objects.
-func (m *JsonReference) ResolveReferences(root string) (interface{}, error) {
+func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.XRef != "" {
info, err := compiler.ReadInfoForRef(root, m.XRef)
@@ -6110,7 +5877,7 @@ func (m *JsonReference) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside License objects.
-func (m *License) ResolveReferences(root string) (interface{}, error) {
+func (m *License) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.VendorExtension {
if item != nil {
@@ -6124,7 +5891,7 @@ func (m *License) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NamedAny objects.
-func (m *NamedAny) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6136,7 +5903,7 @@ func (m *NamedAny) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NamedHeader objects.
-func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6148,7 +5915,7 @@ func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NamedParameter objects.
-func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6160,7 +5927,7 @@ func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NamedPathItem objects.
-func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6172,7 +5939,7 @@ func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NamedResponse objects.
-func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6184,7 +5951,7 @@ func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NamedResponseValue objects.
-func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6196,7 +5963,7 @@ func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error)
}
// ResolveReferences resolves references found inside NamedSchema objects.
-func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6208,7 +5975,7 @@ func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects.
-func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6220,13 +5987,13 @@ func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface
}
// ResolveReferences resolves references found inside NamedString objects.
-func (m *NamedString) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
return nil, compiler.NewErrorGroupOrNil(errors)
}
// ResolveReferences resolves references found inside NamedStringArray objects.
-func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) {
+func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Value != nil {
_, err := m.Value.ResolveReferences(root)
@@ -6238,7 +6005,7 @@ func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside NonBodyParameter objects.
-func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) {
+func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
{
p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema)
@@ -6280,7 +6047,7 @@ func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects.
-func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) {
+func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Scopes != nil {
_, err := m.Scopes.ResolveReferences(root)
@@ -6300,7 +6067,7 @@ func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{},
}
// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects.
-func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) {
+func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Scopes != nil {
_, err := m.Scopes.ResolveReferences(root)
@@ -6320,7 +6087,7 @@ func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{},
}
// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects.
-func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) {
+func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Scopes != nil {
_, err := m.Scopes.ResolveReferences(root)
@@ -6340,7 +6107,7 @@ func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, er
}
// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects.
-func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) {
+func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Scopes != nil {
_, err := m.Scopes.ResolveReferences(root)
@@ -6360,7 +6127,7 @@ func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, er
}
// ResolveReferences resolves references found inside Oauth2Scopes objects.
-func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) {
+func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -6374,7 +6141,7 @@ func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Operation objects.
-func (m *Operation) ResolveReferences(root string) (interface{}, error) {
+func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.ExternalDocs != nil {
_, err := m.ExternalDocs.ResolveReferences(root)
@@ -6416,7 +6183,7 @@ func (m *Operation) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Parameter objects.
-func (m *Parameter) ResolveReferences(root string) (interface{}, error) {
+func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
{
p, ok := m.Oneof.(*Parameter_BodyParameter)
@@ -6440,7 +6207,7 @@ func (m *Parameter) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside ParameterDefinitions objects.
-func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) {
+func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -6454,7 +6221,7 @@ func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, erro
}
// ResolveReferences resolves references found inside ParametersItem objects.
-func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) {
+func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
{
p, ok := m.Oneof.(*ParametersItem_Parameter)
@@ -6486,7 +6253,7 @@ func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside PathItem objects.
-func (m *PathItem) ResolveReferences(root string) (interface{}, error) {
+func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.XRef != "" {
info, err := compiler.ReadInfoForRef(root, m.XRef)
@@ -6564,7 +6331,7 @@ func (m *PathItem) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside PathParameterSubSchema objects.
-func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Items != nil {
_, err := m.Items.ResolveReferences(root)
@@ -6598,7 +6365,7 @@ func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, er
}
// ResolveReferences resolves references found inside Paths objects.
-func (m *Paths) ResolveReferences(root string) (interface{}, error) {
+func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.VendorExtension {
if item != nil {
@@ -6620,7 +6387,7 @@ func (m *Paths) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside PrimitivesItems objects.
-func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) {
+func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Items != nil {
_, err := m.Items.ResolveReferences(root)
@@ -6654,7 +6421,7 @@ func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Properties objects.
-func (m *Properties) ResolveReferences(root string) (interface{}, error) {
+func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -6668,7 +6435,7 @@ func (m *Properties) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside QueryParameterSubSchema objects.
-func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Items != nil {
_, err := m.Items.ResolveReferences(root)
@@ -6702,7 +6469,7 @@ func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, e
}
// ResolveReferences resolves references found inside Response objects.
-func (m *Response) ResolveReferences(root string) (interface{}, error) {
+func (m *Response) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.Schema != nil {
_, err := m.Schema.ResolveReferences(root)
@@ -6734,7 +6501,7 @@ func (m *Response) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside ResponseDefinitions objects.
-func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) {
+func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -6748,7 +6515,7 @@ func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error
}
// ResolveReferences resolves references found inside ResponseValue objects.
-func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) {
+func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
{
p, ok := m.Oneof.(*ResponseValue_Response)
@@ -6780,7 +6547,7 @@ func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Responses objects.
-func (m *Responses) ResolveReferences(root string) (interface{}, error) {
+func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.ResponseCode {
if item != nil {
@@ -6802,7 +6569,7 @@ func (m *Responses) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Schema objects.
-func (m *Schema) ResolveReferences(root string) (interface{}, error) {
+func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.XRef != "" {
info, err := compiler.ReadInfoForRef(root, m.XRef)
@@ -6894,7 +6661,7 @@ func (m *Schema) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside SchemaItem objects.
-func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) {
+func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
{
p, ok := m.Oneof.(*SchemaItem_Schema)
@@ -6918,7 +6685,7 @@ func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside SecurityDefinitions objects.
-func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) {
+func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -6932,7 +6699,7 @@ func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error
}
// ResolveReferences resolves references found inside SecurityDefinitionsItem objects.
-func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
+func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
{
p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity)
@@ -6992,7 +6759,7 @@ func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, e
}
// ResolveReferences resolves references found inside SecurityRequirement objects.
-func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) {
+func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -7006,13 +6773,13 @@ func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error
}
// ResolveReferences resolves references found inside StringArray objects.
-func (m *StringArray) ResolveReferences(root string) (interface{}, error) {
+func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
return nil, compiler.NewErrorGroupOrNil(errors)
}
// ResolveReferences resolves references found inside Tag objects.
-func (m *Tag) ResolveReferences(root string) (interface{}, error) {
+func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
if m.ExternalDocs != nil {
_, err := m.ExternalDocs.ResolveReferences(root)
@@ -7032,13 +6799,13 @@ func (m *Tag) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside TypeItem objects.
-func (m *TypeItem) ResolveReferences(root string) (interface{}, error) {
+func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
return nil, compiler.NewErrorGroupOrNil(errors)
}
// ResolveReferences resolves references found inside VendorExtension objects.
-func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) {
+func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.AdditionalProperties {
if item != nil {
@@ -7052,7 +6819,7 @@ func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) {
}
// ResolveReferences resolves references found inside Xml objects.
-func (m *Xml) ResolveReferences(root string) (interface{}, error) {
+func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) {
errors := make([]error, 0)
for _, item := range m.VendorExtension {
if item != nil {
@@ -7066,7 +6833,7 @@ func (m *Xml) ResolveReferences(root string) (interface{}, error) {
}
// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export.
-func (m *AdditionalPropertiesItem) ToRawInfo() interface{} {
+func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node {
// ONE OF WRAPPER
// AdditionalPropertiesItem
// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
@@ -7076,792 +6843,886 @@ func (m *AdditionalPropertiesItem) ToRawInfo() interface{} {
}
// {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
- return v1.Boolean
+ return compiler.NewScalarNodeForBool(v1.Boolean)
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of Any suitable for JSON or YAML export.
-func (m *Any) ToRawInfo() interface{} {
+func (m *Any) ToRawInfo() *yaml.Node {
var err error
- var info1 []yaml.MapSlice
- err = yaml.Unmarshal([]byte(m.Yaml), &info1)
+ var node yaml.Node
+ err = yaml.Unmarshal([]byte(m.Yaml), &node)
if err == nil {
- return info1
- }
- var info2 yaml.MapSlice
- err = yaml.Unmarshal([]byte(m.Yaml), &info2)
- if err == nil {
- return info2
- }
- var info3 interface{}
- err = yaml.Unmarshal([]byte(m.Yaml), &info3)
- if err == nil {
- return info3
+ if node.Kind == yaml.DocumentNode {
+ return node.Content[0]
+ }
+ return &node
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export.
-func (m *ApiKeySecurity) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *ApiKeySecurity) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export.
-func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export.
-func (m *BodyParameter) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *BodyParameter) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
if m.Required != false {
- info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
- // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("schema"))
+ info.Content = append(info.Content, m.Schema.ToRawInfo())
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Contact suitable for JSON or YAML export.
-func (m *Contact) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Contact) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.Url != "" {
- info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("url"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url))
}
if m.Email != "" {
- info = append(info, yaml.MapItem{Key: "email", Value: m.Email})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("email"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Default suitable for JSON or YAML export.
-func (m *Default) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Default) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:}
return info
}
// ToRawInfo returns a description of Definitions suitable for JSON or YAML export.
-func (m *Definitions) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Definitions) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Document suitable for JSON or YAML export.
-func (m *Document) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Document) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()})
- // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("info"))
+ info.Content = append(info.Content, m.Info.ToRawInfo())
if m.Host != "" {
- info = append(info, yaml.MapItem{Key: "host", Value: m.Host})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("host"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host))
}
if m.BasePath != "" {
- info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath))
}
if len(m.Schemes) != 0 {
- info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes))
}
if len(m.Consumes) != 0 {
- info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes))
}
if len(m.Produces) != 0 {
- info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("produces"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces))
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()})
- // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("paths"))
+ info.Content = append(info.Content, m.Paths.ToRawInfo())
if m.Definitions != nil {
- info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions"))
+ info.Content = append(info.Content, m.Definitions.ToRawInfo())
}
- // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Parameters != nil {
- info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters"))
+ info.Content = append(info.Content, m.Parameters.ToRawInfo())
}
- // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Responses != nil {
- info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("responses"))
+ info.Content = append(info.Content, m.Responses.ToRawInfo())
}
- // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if len(m.Security) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Security {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "security", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("security"))
+ info.Content = append(info.Content, items)
}
- // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.SecurityDefinitions != nil {
- info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions"))
+ info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo())
}
- // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if len(m.Tags) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Tags {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "tags", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("tags"))
+ info.Content = append(info.Content, items)
}
- // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.ExternalDocs != nil {
- info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs"))
+ info.Content = append(info.Content, m.ExternalDocs.ToRawInfo())
}
- // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Examples suitable for JSON or YAML export.
-func (m *Examples) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Examples) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export.
-func (m *ExternalDocs) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *ExternalDocs) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("url"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url))
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export.
-func (m *FileSchema) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *FileSchema) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Title != "" {
- info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("title"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if len(m.Required) != 0 {
- info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required))
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
if m.ReadOnly != false {
- info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
}
if m.ExternalDocs != nil {
- info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs"))
+ info.Content = append(info.Content, m.ExternalDocs.ToRawInfo())
}
- // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Example != nil {
- info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("example"))
+ info.Content = append(info.Content, m.Example.ToRawInfo())
}
- // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export.
-func (m *FormDataParameterSubSchema) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Required != false {
- info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
if m.In != "" {
- info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.AllowEmptyValue != false {
- info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
}
if m.Type != "" {
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
}
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Items != nil {
- info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("items"))
+ info.Content = append(info.Content, m.Items.ToRawInfo())
}
- // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.CollectionFormat != "" {
- info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Maximum != 0.0 {
- info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
if m.Minimum != 0.0 {
- info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
if m.MaxLength != 0 {
- info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength))
}
if m.MinLength != 0 {
- info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength))
}
if m.Pattern != "" {
- info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern))
}
if m.MaxItems != 0 {
- info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems))
}
if m.MinItems != 0 {
- info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
- info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
if len(m.Enum) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Enum {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "enum", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("enum"))
+ info.Content = append(info.Content, items)
}
- // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.MultipleOf != 0.0 {
- info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Header suitable for JSON or YAML export.
-func (m *Header) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Header) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Items != nil {
- info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("items"))
+ info.Content = append(info.Content, m.Items.ToRawInfo())
}
- // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.CollectionFormat != "" {
- info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Maximum != 0.0 {
- info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
if m.Minimum != 0.0 {
- info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
if m.MaxLength != 0 {
- info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength))
}
if m.MinLength != 0 {
- info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength))
}
if m.Pattern != "" {
- info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern))
}
if m.MaxItems != 0 {
- info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems))
}
if m.MinItems != 0 {
- info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
- info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
if len(m.Enum) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Enum {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "enum", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("enum"))
+ info.Content = append(info.Content, items)
}
- // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.MultipleOf != 0.0 {
- info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export.
-func (m *HeaderParameterSubSchema) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Required != false {
- info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
if m.In != "" {
- info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.Type != "" {
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
}
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Items != nil {
- info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("items"))
+ info.Content = append(info.Content, m.Items.ToRawInfo())
}
- // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.CollectionFormat != "" {
- info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Maximum != 0.0 {
- info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
if m.Minimum != 0.0 {
- info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
if m.MaxLength != 0 {
- info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength))
}
if m.MinLength != 0 {
- info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength))
}
if m.Pattern != "" {
- info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern))
}
if m.MaxItems != 0 {
- info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems))
}
if m.MinItems != 0 {
- info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
- info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
if len(m.Enum) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Enum {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "enum", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("enum"))
+ info.Content = append(info.Content, items)
}
- // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.MultipleOf != 0.0 {
- info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Headers suitable for JSON or YAML export.
-func (m *Headers) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Headers) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Info suitable for JSON or YAML export.
-func (m *Info) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Info) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("title"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "version", Value: m.Version})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("version"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.TermsOfService != "" {
- info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService))
}
if m.Contact != nil {
- info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("contact"))
+ info.Content = append(info.Content, m.Contact.ToRawInfo())
}
- // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.License != nil {
- info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("license"))
+ info.Content = append(info.Content, m.License.ToRawInfo())
}
- // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export.
-func (m *ItemsItem) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *ItemsItem) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if len(m.Schema) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Schema {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "schema", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("schema"))
+ info.Content = append(info.Content, items)
}
- // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
return info
}
// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export.
-func (m *JsonReference) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *JsonReference) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
return info
}
// ToRawInfo returns a description of License suitable for JSON or YAML export.
-func (m *License) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *License) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
if m.Url != "" {
- info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("url"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export.
-func (m *NamedAny) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedAny) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
+ }
+ if m.Value != nil {
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("value"))
+ info.Content = append(info.Content, m.Value.ToRawInfo())
}
- // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export.
-func (m *NamedHeader) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedHeader) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export.
-func (m *NamedParameter) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedParameter) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export.
-func (m *NamedPathItem) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedPathItem) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export.
-func (m *NamedResponse) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedResponse) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export.
-func (m *NamedResponseValue) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedResponseValue) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export.
-func (m *NamedSchema) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedSchema) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export.
-func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NamedString suitable for JSON or YAML export.
-func (m *NamedString) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedString) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.Value != "" {
- info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("value"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value))
}
return info
}
// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export.
-func (m *NamedStringArray) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *NamedStringArray) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
// &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
return info
}
// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export.
-func (m *NonBodyParameter) ToRawInfo() interface{} {
+func (m *NonBodyParameter) ToRawInfo() *yaml.Node {
// ONE OF WRAPPER
// NonBodyParameter
// {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
@@ -7884,126 +7745,143 @@ func (m *NonBodyParameter) ToRawInfo() interface{} {
if v3 != nil {
return v3.ToRawInfo()
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export.
-func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("flow"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow))
if m.Scopes != nil {
- info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes"))
+ info.Content = append(info.Content, m.Scopes.ToRawInfo())
}
- // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export.
-func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("flow"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow))
if m.Scopes != nil {
- info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes"))
+ info.Content = append(info.Content, m.Scopes.ToRawInfo())
}
- // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export.
-func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("flow"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow))
if m.Scopes != nil {
- info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes"))
+ info.Content = append(info.Content, m.Scopes.ToRawInfo())
}
- // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export.
-func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
// always include this required field.
- info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("flow"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow))
if m.Scopes != nil {
- info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes"))
+ info.Content = append(info.Content, m.Scopes.ToRawInfo())
}
- // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export.
-func (m *Oauth2Scopes) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Oauth2Scopes) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
@@ -8012,69 +7890,77 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} {
}
// ToRawInfo returns a description of Operation suitable for JSON or YAML export.
-func (m *Operation) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Operation) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if len(m.Tags) != 0 {
- info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("tags"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags))
}
if m.Summary != "" {
- info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("summary"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.ExternalDocs != nil {
- info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs"))
+ info.Content = append(info.Content, m.ExternalDocs.ToRawInfo())
}
- // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.OperationId != "" {
- info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId))
}
if len(m.Produces) != 0 {
- info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("produces"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces))
}
if len(m.Consumes) != 0 {
- info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes))
}
if len(m.Parameters) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Parameters {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "parameters", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters"))
+ info.Content = append(info.Content, items)
}
- // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
- // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("responses"))
+ info.Content = append(info.Content, m.Responses.ToRawInfo())
if len(m.Schemes) != 0 {
- info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes))
}
if m.Deprecated != false {
- info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated))
}
if len(m.Security) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Security {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "security", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("security"))
+ info.Content = append(info.Content, items)
}
- // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Parameter suitable for JSON or YAML export.
-func (m *Parameter) ToRawInfo() interface{} {
+func (m *Parameter) ToRawInfo() *yaml.Node {
// ONE OF WRAPPER
// Parameter
// {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
@@ -8087,26 +7973,26 @@ func (m *Parameter) ToRawInfo() interface{} {
if v1 != nil {
return v1.ToRawInfo()
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export.
-func (m *ParameterDefinitions) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *ParameterDefinitions) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export.
-func (m *ParametersItem) ToRawInfo() interface{} {
+func (m *ParametersItem) ToRawInfo() *yaml.Node {
// ONE OF WRAPPER
// ParametersItem
// {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
@@ -8119,390 +8005,443 @@ func (m *ParametersItem) ToRawInfo() interface{} {
if v1 != nil {
return v1.ToRawInfo()
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of PathItem suitable for JSON or YAML export.
-func (m *PathItem) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *PathItem) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.XRef != "" {
- info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef))
}
if m.Get != nil {
- info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("get"))
+ info.Content = append(info.Content, m.Get.ToRawInfo())
}
- // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Put != nil {
- info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("put"))
+ info.Content = append(info.Content, m.Put.ToRawInfo())
}
- // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Post != nil {
- info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("post"))
+ info.Content = append(info.Content, m.Post.ToRawInfo())
}
- // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Delete != nil {
- info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("delete"))
+ info.Content = append(info.Content, m.Delete.ToRawInfo())
}
- // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Options != nil {
- info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("options"))
+ info.Content = append(info.Content, m.Options.ToRawInfo())
}
- // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Head != nil {
- info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("head"))
+ info.Content = append(info.Content, m.Head.ToRawInfo())
}
- // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Patch != nil {
- info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("patch"))
+ info.Content = append(info.Content, m.Patch.ToRawInfo())
}
- // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if len(m.Parameters) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Parameters {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "parameters", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters"))
+ info.Content = append(info.Content, items)
}
- // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export.
-func (m *PathParameterSubSchema) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
if m.In != "" {
- info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.Type != "" {
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
}
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Items != nil {
- info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("items"))
+ info.Content = append(info.Content, m.Items.ToRawInfo())
}
- // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.CollectionFormat != "" {
- info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Maximum != 0.0 {
- info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
if m.Minimum != 0.0 {
- info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
if m.MaxLength != 0 {
- info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength))
}
if m.MinLength != 0 {
- info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength))
}
if m.Pattern != "" {
- info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern))
}
if m.MaxItems != 0 {
- info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems))
}
if m.MinItems != 0 {
- info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
- info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
if len(m.Enum) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Enum {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "enum", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("enum"))
+ info.Content = append(info.Content, items)
}
- // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.MultipleOf != 0.0 {
- info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Paths suitable for JSON or YAML export.
-func (m *Paths) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Paths) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
if m.Path != nil {
for _, item := range m.Path {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:}
return info
}
// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export.
-func (m *PrimitivesItems) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Type != "" {
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
}
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Items != nil {
- info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("items"))
+ info.Content = append(info.Content, m.Items.ToRawInfo())
}
- // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.CollectionFormat != "" {
- info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Maximum != 0.0 {
- info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
if m.Minimum != 0.0 {
- info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
if m.MaxLength != 0 {
- info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength))
}
if m.MinLength != 0 {
- info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength))
}
if m.Pattern != "" {
- info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern))
}
if m.MaxItems != 0 {
- info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems))
}
if m.MinItems != 0 {
- info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
- info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
if len(m.Enum) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Enum {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "enum", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("enum"))
+ info.Content = append(info.Content, items)
}
- // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.MultipleOf != 0.0 {
- info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Properties suitable for JSON or YAML export.
-func (m *Properties) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Properties) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export.
-func (m *QueryParameterSubSchema) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Required != false {
- info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
if m.In != "" {
- info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.AllowEmptyValue != false {
- info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
}
if m.Type != "" {
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
}
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Items != nil {
- info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("items"))
+ info.Content = append(info.Content, m.Items.ToRawInfo())
}
- // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.CollectionFormat != "" {
- info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Maximum != 0.0 {
- info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
if m.Minimum != 0.0 {
- info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
if m.MaxLength != 0 {
- info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength))
}
if m.MinLength != 0 {
- info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength))
}
if m.Pattern != "" {
- info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern))
}
if m.MaxItems != 0 {
- info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems))
}
if m.MinItems != 0 {
- info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
- info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
if len(m.Enum) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Enum {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "enum", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("enum"))
+ info.Content = append(info.Content, items)
}
- // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.MultipleOf != 0.0 {
- info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Response suitable for JSON or YAML export.
-func (m *Response) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Response) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
if m.Schema != nil {
- info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("schema"))
+ info.Content = append(info.Content, m.Schema.ToRawInfo())
}
- // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Headers != nil {
- info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("headers"))
+ info.Content = append(info.Content, m.Headers.ToRawInfo())
}
- // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Examples != nil {
- info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("examples"))
+ info.Content = append(info.Content, m.Examples.ToRawInfo())
}
- // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export.
-func (m *ResponseDefinitions) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *ResponseDefinitions) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export.
-func (m *ResponseValue) ToRawInfo() interface{} {
+func (m *ResponseValue) ToRawInfo() *yaml.Node {
// ONE OF WRAPPER
// ResponseValue
// {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
@@ -8515,163 +8454,187 @@ func (m *ResponseValue) ToRawInfo() interface{} {
if v1 != nil {
return v1.ToRawInfo()
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of Responses suitable for JSON or YAML export.
-func (m *Responses) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Responses) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.ResponseCode != nil {
for _, item := range m.ResponseCode {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Schema suitable for JSON or YAML export.
-func (m *Schema) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Schema) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.XRef != "" {
- info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef))
}
if m.Format != "" {
- info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("format"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format))
}
if m.Title != "" {
- info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("title"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title))
}
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.Default != nil {
- info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("default"))
+ info.Content = append(info.Content, m.Default.ToRawInfo())
}
- // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.MultipleOf != 0.0 {
- info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf))
}
if m.Maximum != 0.0 {
- info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
if m.Minimum != 0.0 {
- info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
- info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
if m.MaxLength != 0 {
- info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength))
}
if m.MinLength != 0 {
- info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength))
}
if m.Pattern != "" {
- info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern))
}
if m.MaxItems != 0 {
- info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems))
}
if m.MinItems != 0 {
- info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
- info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
if m.MaxProperties != 0 {
- info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties))
}
if m.MinProperties != 0 {
- info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties))
}
if len(m.Required) != 0 {
- info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required))
}
if len(m.Enum) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Enum {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "enum", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("enum"))
+ info.Content = append(info.Content, items)
}
- // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.AdditionalProperties != nil {
- info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties"))
+ info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo())
}
- // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Type != nil {
if len(m.Type.Value) == 1 {
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0]))
} else {
- info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value))
}
}
- // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Items != nil {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.Items.Schema {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
+ }
+ if len(items.Content) == 1 {
+ items = items.Content[0]
}
- info = append(info, yaml.MapItem{Key: "items", Value: items[0]})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("items"))
+ info.Content = append(info.Content, items)
}
- // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if len(m.AllOf) != 0 {
- items := make([]interface{}, 0)
+ items := compiler.NewSequenceNode()
for _, item := range m.AllOf {
- items = append(items, item.ToRawInfo())
+ items.Content = append(items.Content, item.ToRawInfo())
}
- info = append(info, yaml.MapItem{Key: "allOf", Value: items})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf"))
+ info.Content = append(info.Content, items)
}
- // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
if m.Properties != nil {
- info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("properties"))
+ info.Content = append(info.Content, m.Properties.ToRawInfo())
}
- // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Discriminator != "" {
- info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator))
}
if m.ReadOnly != false {
- info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
}
if m.Xml != nil {
- info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("xml"))
+ info.Content = append(info.Content, m.Xml.ToRawInfo())
}
- // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.ExternalDocs != nil {
- info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs"))
+ info.Content = append(info.Content, m.ExternalDocs.ToRawInfo())
}
- // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.Example != nil {
- info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("example"))
+ info.Content = append(info.Content, m.Example.ToRawInfo())
}
- // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export.
-func (m *SchemaItem) ToRawInfo() interface{} {
+func (m *SchemaItem) ToRawInfo() *yaml.Node {
// ONE OF WRAPPER
// SchemaItem
// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
@@ -8684,26 +8647,26 @@ func (m *SchemaItem) ToRawInfo() interface{} {
if v1 != nil {
return v1.ToRawInfo()
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export.
-func (m *SecurityDefinitions) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *SecurityDefinitions) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export.
-func (m *SecurityDefinitionsItem) ToRawInfo() interface{} {
+func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node {
// ONE OF WRAPPER
// SecurityDefinitionsItem
// {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
@@ -8736,107 +8699,115 @@ func (m *SecurityDefinitionsItem) ToRawInfo() interface{} {
if v5 != nil {
return v5.ToRawInfo()
}
- return nil
+ return compiler.NewNullNode()
}
// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export.
-func (m *SecurityRequirement) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *SecurityRequirement) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of StringArray suitable for JSON or YAML export.
-func (m *StringArray) ToRawInfo() interface{} {
- return m.Value
+func (m *StringArray) ToRawInfo() *yaml.Node {
+ return compiler.NewSequenceNodeForStringArray(m.Value)
}
// ToRawInfo returns a description of Tag suitable for JSON or YAML export.
-func (m *Tag) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Tag) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
// always include this required field.
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
if m.Description != "" {
- info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("description"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description))
}
if m.ExternalDocs != nil {
- info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs"))
+ info.Content = append(info.Content, m.ExternalDocs.ToRawInfo())
}
- // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export.
-func (m *TypeItem) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *TypeItem) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if len(m.Value) != 0 {
- info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("value"))
+ info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value))
}
return info
}
// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export.
-func (m *VendorExtension) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *VendorExtension) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
return info
}
// ToRawInfo returns a description of Xml suitable for JSON or YAML export.
-func (m *Xml) ToRawInfo() interface{} {
- info := yaml.MapSlice{}
+func (m *Xml) ToRawInfo() *yaml.Node {
+ info := compiler.NewMappingNode()
if m == nil {
return info
}
if m.Name != "" {
- info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.Namespace != "" {
- info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace))
}
if m.Prefix != "" {
- info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix))
}
if m.Attribute != false {
- info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute))
}
if m.Wrapped != false {
- info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped"))
+ info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped))
}
if m.VendorExtension != nil {
for _, item := range m.VendorExtension {
- info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, item.Value.ToRawInfo())
}
}
- // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
return info
}
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
index 55a6cb516..8a5f302f3 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
@@ -1,77 +1,85 @@
+// Copyright 2020 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// THIS FILE IS AUTOMATICALLY GENERATED.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.15.5
// source: openapiv2/OpenAPIv2.proto
package openapi_v2
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- any "github.com/golang/protobuf/ptypes/any"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type AdditionalPropertiesItem struct {
- // Types that are valid to be assigned to Oneof:
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Oneof:
// *AdditionalPropertiesItem_Schema
// *AdditionalPropertiesItem_Boolean
- Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
}
-func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} }
-func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) }
-func (*AdditionalPropertiesItem) ProtoMessage() {}
-func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{0}
+func (x *AdditionalPropertiesItem) Reset() {
+ *x = AdditionalPropertiesItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *AdditionalPropertiesItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AdditionalPropertiesItem.Unmarshal(m, b)
-}
-func (m *AdditionalPropertiesItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AdditionalPropertiesItem.Marshal(b, m, deterministic)
-}
-func (m *AdditionalPropertiesItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AdditionalPropertiesItem.Merge(m, src)
-}
-func (m *AdditionalPropertiesItem) XXX_Size() int {
- return xxx_messageInfo_AdditionalPropertiesItem.Size(m)
-}
-func (m *AdditionalPropertiesItem) XXX_DiscardUnknown() {
- xxx_messageInfo_AdditionalPropertiesItem.DiscardUnknown(m)
+func (x *AdditionalPropertiesItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_AdditionalPropertiesItem proto.InternalMessageInfo
-
-type isAdditionalPropertiesItem_Oneof interface {
- isAdditionalPropertiesItem_Oneof()
-}
+func (*AdditionalPropertiesItem) ProtoMessage() {}
-type AdditionalPropertiesItem_Schema struct {
- Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"`
+func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-type AdditionalPropertiesItem_Boolean struct {
- Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"`
+// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead.
+func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0}
}
-func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {}
-
-func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {}
-
func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof {
if m != nil {
return m.Oneof
@@ -79,202 +87,238 @@ func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof {
return nil
}
-func (m *AdditionalPropertiesItem) GetSchema() *Schema {
- if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok {
+func (x *AdditionalPropertiesItem) GetSchema() *Schema {
+ if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok {
return x.Schema
}
return nil
}
-func (m *AdditionalPropertiesItem) GetBoolean() bool {
- if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
+func (x *AdditionalPropertiesItem) GetBoolean() bool {
+ if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
return x.Boolean
}
return false
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*AdditionalPropertiesItem) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*AdditionalPropertiesItem_Schema)(nil),
- (*AdditionalPropertiesItem_Boolean)(nil),
- }
+type isAdditionalPropertiesItem_Oneof interface {
+ isAdditionalPropertiesItem_Oneof()
}
-type Any struct {
- Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+type AdditionalPropertiesItem_Schema struct {
+ Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"`
}
-func (m *Any) Reset() { *m = Any{} }
-func (m *Any) String() string { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage() {}
-func (*Any) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{1}
+type AdditionalPropertiesItem_Boolean struct {
+ Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"`
}
-func (m *Any) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Any.Unmarshal(m, b)
-}
-func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {}
+
+func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {}
+
+type Any struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"`
}
-func (m *Any) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Any.Merge(m, src)
+
+func (x *Any) Reset() {
+ *x = Any{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Any) XXX_Size() int {
- return xxx_messageInfo_Any.Size(m)
+
+func (x *Any) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Any) XXX_DiscardUnknown() {
- xxx_messageInfo_Any.DiscardUnknown(m)
+
+func (*Any) ProtoMessage() {}
+
+func (x *Any) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Any proto.InternalMessageInfo
+// Deprecated: Use Any.ProtoReflect.Descriptor instead.
+func (*Any) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1}
+}
-func (m *Any) GetValue() *any.Any {
- if m != nil {
- return m.Value
+func (x *Any) GetValue() *anypb.Any {
+ if x != nil {
+ return x.Value
}
return nil
}
-func (m *Any) GetYaml() string {
- if m != nil {
- return m.Yaml
+func (x *Any) GetYaml() string {
+ if x != nil {
+ return x.Yaml
}
return ""
}
type ApiKeySecurity struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"`
- Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} }
-func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) }
-func (*ApiKeySecurity) ProtoMessage() {}
-func (*ApiKeySecurity) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{2}
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *ApiKeySecurity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ApiKeySecurity.Unmarshal(m, b)
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"`
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *ApiKeySecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ApiKeySecurity.Marshal(b, m, deterministic)
-}
-func (m *ApiKeySecurity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApiKeySecurity.Merge(m, src)
+
+func (x *ApiKeySecurity) Reset() {
+ *x = ApiKeySecurity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ApiKeySecurity) XXX_Size() int {
- return xxx_messageInfo_ApiKeySecurity.Size(m)
+
+func (x *ApiKeySecurity) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ApiKeySecurity) XXX_DiscardUnknown() {
- xxx_messageInfo_ApiKeySecurity.DiscardUnknown(m)
+
+func (*ApiKeySecurity) ProtoMessage() {}
+
+func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ApiKeySecurity proto.InternalMessageInfo
+// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead.
+func (*ApiKeySecurity) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2}
+}
-func (m *ApiKeySecurity) GetType() string {
- if m != nil {
- return m.Type
+func (x *ApiKeySecurity) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *ApiKeySecurity) GetName() string {
- if m != nil {
- return m.Name
+func (x *ApiKeySecurity) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *ApiKeySecurity) GetIn() string {
- if m != nil {
- return m.In
+func (x *ApiKeySecurity) GetIn() string {
+ if x != nil {
+ return x.In
}
return ""
}
-func (m *ApiKeySecurity) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *ApiKeySecurity) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type BasicAuthenticationSecurity struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} }
-func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) }
-func (*BasicAuthenticationSecurity) ProtoMessage() {}
-func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{3}
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *BasicAuthenticationSecurity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BasicAuthenticationSecurity.Unmarshal(m, b)
-}
-func (m *BasicAuthenticationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BasicAuthenticationSecurity.Marshal(b, m, deterministic)
-}
-func (m *BasicAuthenticationSecurity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BasicAuthenticationSecurity.Merge(m, src)
+func (x *BasicAuthenticationSecurity) Reset() {
+ *x = BasicAuthenticationSecurity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *BasicAuthenticationSecurity) XXX_Size() int {
- return xxx_messageInfo_BasicAuthenticationSecurity.Size(m)
+
+func (x *BasicAuthenticationSecurity) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BasicAuthenticationSecurity) XXX_DiscardUnknown() {
- xxx_messageInfo_BasicAuthenticationSecurity.DiscardUnknown(m)
+
+func (*BasicAuthenticationSecurity) ProtoMessage() {}
+
+func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_BasicAuthenticationSecurity proto.InternalMessageInfo
+// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead.
+func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3}
+}
-func (m *BasicAuthenticationSecurity) GetType() string {
- if m != nil {
- return m.Type
+func (x *BasicAuthenticationSecurity) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *BasicAuthenticationSecurity) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *BasicAuthenticationSecurity) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type BodyParameter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
// The name of the parameter.
@@ -282,228 +326,260 @@ type BodyParameter struct {
// Determines the location of the parameter.
In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"`
// Determines whether or not this parameter is required or optional.
- Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
- Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+ Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *BodyParameter) Reset() { *m = BodyParameter{} }
-func (m *BodyParameter) String() string { return proto.CompactTextString(m) }
-func (*BodyParameter) ProtoMessage() {}
-func (*BodyParameter) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{4}
+func (x *BodyParameter) Reset() {
+ *x = BodyParameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *BodyParameter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BodyParameter.Unmarshal(m, b)
-}
-func (m *BodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BodyParameter.Marshal(b, m, deterministic)
-}
-func (m *BodyParameter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BodyParameter.Merge(m, src)
-}
-func (m *BodyParameter) XXX_Size() int {
- return xxx_messageInfo_BodyParameter.Size(m)
+func (x *BodyParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BodyParameter) XXX_DiscardUnknown() {
- xxx_messageInfo_BodyParameter.DiscardUnknown(m)
+
+func (*BodyParameter) ProtoMessage() {}
+
+func (x *BodyParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_BodyParameter proto.InternalMessageInfo
+// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead.
+func (*BodyParameter) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4}
+}
-func (m *BodyParameter) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *BodyParameter) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *BodyParameter) GetName() string {
- if m != nil {
- return m.Name
+func (x *BodyParameter) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *BodyParameter) GetIn() string {
- if m != nil {
- return m.In
+func (x *BodyParameter) GetIn() string {
+ if x != nil {
+ return x.In
}
return ""
}
-func (m *BodyParameter) GetRequired() bool {
- if m != nil {
- return m.Required
+func (x *BodyParameter) GetRequired() bool {
+ if x != nil {
+ return x.Required
}
return false
}
-func (m *BodyParameter) GetSchema() *Schema {
- if m != nil {
- return m.Schema
+func (x *BodyParameter) GetSchema() *Schema {
+ if x != nil {
+ return x.Schema
}
return nil
}
-func (m *BodyParameter) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *BodyParameter) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
// Contact information for the owners of the API.
type Contact struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// The identifying name of the contact person/organization.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The URL pointing to the contact information.
Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
// The email address of the contact person/organization.
- Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Contact) Reset() { *m = Contact{} }
-func (m *Contact) String() string { return proto.CompactTextString(m) }
-func (*Contact) ProtoMessage() {}
-func (*Contact) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{5}
+func (x *Contact) Reset() {
+ *x = Contact{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Contact) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Contact.Unmarshal(m, b)
-}
-func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Contact.Marshal(b, m, deterministic)
+func (x *Contact) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Contact) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Contact.Merge(m, src)
-}
-func (m *Contact) XXX_Size() int {
- return xxx_messageInfo_Contact.Size(m)
-}
-func (m *Contact) XXX_DiscardUnknown() {
- xxx_messageInfo_Contact.DiscardUnknown(m)
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Contact proto.InternalMessageInfo
+// Deprecated: Use Contact.ProtoReflect.Descriptor instead.
+func (*Contact) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5}
+}
-func (m *Contact) GetName() string {
- if m != nil {
- return m.Name
+func (x *Contact) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *Contact) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *Contact) GetUrl() string {
+ if x != nil {
+ return x.Url
}
return ""
}
-func (m *Contact) GetEmail() string {
- if m != nil {
- return m.Email
+func (x *Contact) GetEmail() string {
+ if x != nil {
+ return x.Email
}
return ""
}
-func (m *Contact) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Contact) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Default struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Default) Reset() { *m = Default{} }
-func (m *Default) String() string { return proto.CompactTextString(m) }
-func (*Default) ProtoMessage() {}
-func (*Default) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{6}
+func (x *Default) Reset() {
+ *x = Default{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Default) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Default.Unmarshal(m, b)
-}
-func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Default.Marshal(b, m, deterministic)
-}
-func (m *Default) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Default.Merge(m, src)
+func (x *Default) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Default) XXX_Size() int {
- return xxx_messageInfo_Default.Size(m)
-}
-func (m *Default) XXX_DiscardUnknown() {
- xxx_messageInfo_Default.DiscardUnknown(m)
+
+func (*Default) ProtoMessage() {}
+
+func (x *Default) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Default proto.InternalMessageInfo
+// Deprecated: Use Default.ProtoReflect.Descriptor instead.
+func (*Default) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6}
+}
-func (m *Default) GetAdditionalProperties() []*NamedAny {
- if m != nil {
- return m.AdditionalProperties
+func (x *Default) GetAdditionalProperties() []*NamedAny {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
// One or more JSON objects describing the schemas being consumed and produced by the API.
type Definitions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Definitions) Reset() { *m = Definitions{} }
-func (m *Definitions) String() string { return proto.CompactTextString(m) }
-func (*Definitions) ProtoMessage() {}
-func (*Definitions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{7}
+func (x *Definitions) Reset() {
+ *x = Definitions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Definitions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Definitions.Unmarshal(m, b)
-}
-func (m *Definitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Definitions.Marshal(b, m, deterministic)
-}
-func (m *Definitions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Definitions.Merge(m, src)
+func (x *Definitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Definitions) XXX_Size() int {
- return xxx_messageInfo_Definitions.Size(m)
-}
-func (m *Definitions) XXX_DiscardUnknown() {
- xxx_messageInfo_Definitions.DiscardUnknown(m)
+
+func (*Definitions) ProtoMessage() {}
+
+func (x *Definitions) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Definitions proto.InternalMessageInfo
+// Deprecated: Use Definitions.ProtoReflect.Descriptor instead.
+func (*Definitions) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7}
+}
-func (m *Definitions) GetAdditionalProperties() []*NamedSchema {
- if m != nil {
- return m.AdditionalProperties
+func (x *Definitions) GetAdditionalProperties() []*NamedSchema {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
type Document struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// The Swagger version of this document.
Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
@@ -516,366 +592,398 @@ type Document struct {
// A list of MIME types accepted by the API.
Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
// A list of MIME types the API can produce.
- Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
- Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"`
- Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"`
- Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"`
- Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"`
- Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
- SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
- Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"`
- ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Document) Reset() { *m = Document{} }
-func (m *Document) String() string { return proto.CompactTextString(m) }
-func (*Document) ProtoMessage() {}
-func (*Document) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{8}
+ Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"`
+ Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"`
+ Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"`
+ Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"`
+ Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+ Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"`
+ ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Document) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Document.Unmarshal(m, b)
-}
-func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Document.Marshal(b, m, deterministic)
-}
-func (m *Document) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Document.Merge(m, src)
+func (x *Document) Reset() {
+ *x = Document{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Document) XXX_Size() int {
- return xxx_messageInfo_Document.Size(m)
+
+func (x *Document) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Document) XXX_DiscardUnknown() {
- xxx_messageInfo_Document.DiscardUnknown(m)
+
+func (*Document) ProtoMessage() {}
+
+func (x *Document) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Document proto.InternalMessageInfo
+// Deprecated: Use Document.ProtoReflect.Descriptor instead.
+func (*Document) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8}
+}
-func (m *Document) GetSwagger() string {
- if m != nil {
- return m.Swagger
+func (x *Document) GetSwagger() string {
+ if x != nil {
+ return x.Swagger
}
return ""
}
-func (m *Document) GetInfo() *Info {
- if m != nil {
- return m.Info
+func (x *Document) GetInfo() *Info {
+ if x != nil {
+ return x.Info
}
return nil
}
-func (m *Document) GetHost() string {
- if m != nil {
- return m.Host
+func (x *Document) GetHost() string {
+ if x != nil {
+ return x.Host
}
return ""
}
-func (m *Document) GetBasePath() string {
- if m != nil {
- return m.BasePath
+func (x *Document) GetBasePath() string {
+ if x != nil {
+ return x.BasePath
}
return ""
}
-func (m *Document) GetSchemes() []string {
- if m != nil {
- return m.Schemes
+func (x *Document) GetSchemes() []string {
+ if x != nil {
+ return x.Schemes
}
return nil
}
-func (m *Document) GetConsumes() []string {
- if m != nil {
- return m.Consumes
+func (x *Document) GetConsumes() []string {
+ if x != nil {
+ return x.Consumes
}
return nil
}
-func (m *Document) GetProduces() []string {
- if m != nil {
- return m.Produces
+func (x *Document) GetProduces() []string {
+ if x != nil {
+ return x.Produces
}
return nil
}
-func (m *Document) GetPaths() *Paths {
- if m != nil {
- return m.Paths
+func (x *Document) GetPaths() *Paths {
+ if x != nil {
+ return x.Paths
}
return nil
}
-func (m *Document) GetDefinitions() *Definitions {
- if m != nil {
- return m.Definitions
+func (x *Document) GetDefinitions() *Definitions {
+ if x != nil {
+ return x.Definitions
}
return nil
}
-func (m *Document) GetParameters() *ParameterDefinitions {
- if m != nil {
- return m.Parameters
+func (x *Document) GetParameters() *ParameterDefinitions {
+ if x != nil {
+ return x.Parameters
}
return nil
}
-func (m *Document) GetResponses() *ResponseDefinitions {
- if m != nil {
- return m.Responses
+func (x *Document) GetResponses() *ResponseDefinitions {
+ if x != nil {
+ return x.Responses
}
return nil
}
-func (m *Document) GetSecurity() []*SecurityRequirement {
- if m != nil {
- return m.Security
+func (x *Document) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ return x.Security
}
return nil
}
-func (m *Document) GetSecurityDefinitions() *SecurityDefinitions {
- if m != nil {
- return m.SecurityDefinitions
+func (x *Document) GetSecurityDefinitions() *SecurityDefinitions {
+ if x != nil {
+ return x.SecurityDefinitions
}
return nil
}
-func (m *Document) GetTags() []*Tag {
- if m != nil {
- return m.Tags
+func (x *Document) GetTags() []*Tag {
+ if x != nil {
+ return x.Tags
}
return nil
}
-func (m *Document) GetExternalDocs() *ExternalDocs {
- if m != nil {
- return m.ExternalDocs
+func (x *Document) GetExternalDocs() *ExternalDocs {
+ if x != nil {
+ return x.ExternalDocs
}
return nil
}
-func (m *Document) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Document) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Examples struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Examples) Reset() { *m = Examples{} }
-func (m *Examples) String() string { return proto.CompactTextString(m) }
-func (*Examples) ProtoMessage() {}
-func (*Examples) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{9}
+func (x *Examples) Reset() {
+ *x = Examples{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Examples) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Examples.Unmarshal(m, b)
-}
-func (m *Examples) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Examples.Marshal(b, m, deterministic)
-}
-func (m *Examples) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Examples.Merge(m, src)
-}
-func (m *Examples) XXX_Size() int {
- return xxx_messageInfo_Examples.Size(m)
+func (x *Examples) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Examples) XXX_DiscardUnknown() {
- xxx_messageInfo_Examples.DiscardUnknown(m)
+
+func (*Examples) ProtoMessage() {}
+
+func (x *Examples) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Examples proto.InternalMessageInfo
+// Deprecated: Use Examples.ProtoReflect.Descriptor instead.
+func (*Examples) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9}
+}
-func (m *Examples) GetAdditionalProperties() []*NamedAny {
- if m != nil {
- return m.AdditionalProperties
+func (x *Examples) GetAdditionalProperties() []*NamedAny {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
// information about external documentation
type ExternalDocs struct {
- Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
- Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *ExternalDocs) Reset() { *m = ExternalDocs{} }
-func (m *ExternalDocs) String() string { return proto.CompactTextString(m) }
-func (*ExternalDocs) ProtoMessage() {}
-func (*ExternalDocs) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{10}
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *ExternalDocs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExternalDocs.Unmarshal(m, b)
-}
-func (m *ExternalDocs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExternalDocs.Marshal(b, m, deterministic)
-}
-func (m *ExternalDocs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalDocs.Merge(m, src)
+func (x *ExternalDocs) Reset() {
+ *x = ExternalDocs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ExternalDocs) XXX_Size() int {
- return xxx_messageInfo_ExternalDocs.Size(m)
+
+func (x *ExternalDocs) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ExternalDocs) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalDocs.DiscardUnknown(m)
+
+func (*ExternalDocs) ProtoMessage() {}
+
+func (x *ExternalDocs) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ExternalDocs proto.InternalMessageInfo
+// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead.
+func (*ExternalDocs) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10}
+}
-func (m *ExternalDocs) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *ExternalDocs) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *ExternalDocs) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *ExternalDocs) GetUrl() string {
+ if x != nil {
+ return x.Url
}
return ""
}
-func (m *ExternalDocs) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *ExternalDocs) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
// A deterministic version of a JSON Schema object.
type FileSchema struct {
- Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"`
- Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
- Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
- Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"`
- Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"`
- Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"`
- ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
- ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
- Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileSchema) Reset() { *m = FileSchema{} }
-func (m *FileSchema) String() string { return proto.CompactTextString(m) }
-func (*FileSchema) ProtoMessage() {}
-func (*FileSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{11}
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *FileSchema) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileSchema.Unmarshal(m, b)
+ Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"`
+ Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"`
+ Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"`
+ Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"`
+ ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *FileSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileSchema.Marshal(b, m, deterministic)
-}
-func (m *FileSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileSchema.Merge(m, src)
+
+func (x *FileSchema) Reset() {
+ *x = FileSchema{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *FileSchema) XXX_Size() int {
- return xxx_messageInfo_FileSchema.Size(m)
+
+func (x *FileSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *FileSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_FileSchema.DiscardUnknown(m)
+
+func (*FileSchema) ProtoMessage() {}
+
+func (x *FileSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_FileSchema proto.InternalMessageInfo
+// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead.
+func (*FileSchema) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11}
+}
-func (m *FileSchema) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *FileSchema) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *FileSchema) GetTitle() string {
- if m != nil {
- return m.Title
+func (x *FileSchema) GetTitle() string {
+ if x != nil {
+ return x.Title
}
return ""
}
-func (m *FileSchema) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *FileSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *FileSchema) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *FileSchema) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *FileSchema) GetRequired() []string {
- if m != nil {
- return m.Required
+func (x *FileSchema) GetRequired() []string {
+ if x != nil {
+ return x.Required
}
return nil
}
-func (m *FileSchema) GetType() string {
- if m != nil {
- return m.Type
+func (x *FileSchema) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *FileSchema) GetReadOnly() bool {
- if m != nil {
- return m.ReadOnly
+func (x *FileSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
}
return false
}
-func (m *FileSchema) GetExternalDocs() *ExternalDocs {
- if m != nil {
- return m.ExternalDocs
+func (x *FileSchema) GetExternalDocs() *ExternalDocs {
+ if x != nil {
+ return x.ExternalDocs
}
return nil
}
-func (m *FileSchema) GetExample() *Any {
- if m != nil {
- return m.Example
+func (x *FileSchema) GetExample() *Any {
+ if x != nil {
+ return x.Example
}
return nil
}
-func (m *FileSchema) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *FileSchema) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type FormDataParameterSubSchema struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Determines whether or not this parameter is required or optional.
Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"`
// Determines the location of the parameter.
@@ -885,400 +993,416 @@ type FormDataParameterSubSchema struct {
// The name of the parameter.
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
// allows sending a parameter by name only or with an empty value.
- AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"`
- Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"`
- Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"`
- Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"`
- CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
- Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"`
- Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
- ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
- Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
- ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
- MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
- MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
- Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
- MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
- MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
- UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
- Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"`
- MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} }
-func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) }
-func (*FormDataParameterSubSchema) ProtoMessage() {}
+ AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"`
+ Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"`
+ Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"`
+ Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"`
+ CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
+ Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"`
+ Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
+}
+
+func (x *FormDataParameterSubSchema) Reset() {
+ *x = FormDataParameterSubSchema{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FormDataParameterSubSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FormDataParameterSubSchema) ProtoMessage() {}
+
+func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead.
func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{12}
-}
-
-func (m *FormDataParameterSubSchema) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FormDataParameterSubSchema.Unmarshal(m, b)
-}
-func (m *FormDataParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FormDataParameterSubSchema.Marshal(b, m, deterministic)
-}
-func (m *FormDataParameterSubSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FormDataParameterSubSchema.Merge(m, src)
-}
-func (m *FormDataParameterSubSchema) XXX_Size() int {
- return xxx_messageInfo_FormDataParameterSubSchema.Size(m)
-}
-func (m *FormDataParameterSubSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_FormDataParameterSubSchema.DiscardUnknown(m)
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12}
}
-var xxx_messageInfo_FormDataParameterSubSchema proto.InternalMessageInfo
-
-func (m *FormDataParameterSubSchema) GetRequired() bool {
- if m != nil {
- return m.Required
+func (x *FormDataParameterSubSchema) GetRequired() bool {
+ if x != nil {
+ return x.Required
}
return false
}
-func (m *FormDataParameterSubSchema) GetIn() string {
- if m != nil {
- return m.In
+func (x *FormDataParameterSubSchema) GetIn() string {
+ if x != nil {
+ return x.In
}
return ""
}
-func (m *FormDataParameterSubSchema) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *FormDataParameterSubSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *FormDataParameterSubSchema) GetName() string {
- if m != nil {
- return m.Name
+func (x *FormDataParameterSubSchema) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool {
- if m != nil {
- return m.AllowEmptyValue
+func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool {
+ if x != nil {
+ return x.AllowEmptyValue
}
return false
}
-func (m *FormDataParameterSubSchema) GetType() string {
- if m != nil {
- return m.Type
+func (x *FormDataParameterSubSchema) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *FormDataParameterSubSchema) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *FormDataParameterSubSchema) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems {
- if m != nil {
- return m.Items
+func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems {
+ if x != nil {
+ return x.Items
}
return nil
}
-func (m *FormDataParameterSubSchema) GetCollectionFormat() string {
- if m != nil {
- return m.CollectionFormat
+func (x *FormDataParameterSubSchema) GetCollectionFormat() string {
+ if x != nil {
+ return x.CollectionFormat
}
return ""
}
-func (m *FormDataParameterSubSchema) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *FormDataParameterSubSchema) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *FormDataParameterSubSchema) GetMaximum() float64 {
- if m != nil {
- return m.Maximum
+func (x *FormDataParameterSubSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
}
return 0
}
-func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool {
- if m != nil {
- return m.ExclusiveMaximum
+func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
}
return false
}
-func (m *FormDataParameterSubSchema) GetMinimum() float64 {
- if m != nil {
- return m.Minimum
+func (x *FormDataParameterSubSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
}
return 0
}
-func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool {
- if m != nil {
- return m.ExclusiveMinimum
+func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
}
return false
}
-func (m *FormDataParameterSubSchema) GetMaxLength() int64 {
- if m != nil {
- return m.MaxLength
+func (x *FormDataParameterSubSchema) GetMaxLength() int64 {
+ if x != nil {
+ return x.MaxLength
}
return 0
}
-func (m *FormDataParameterSubSchema) GetMinLength() int64 {
- if m != nil {
- return m.MinLength
+func (x *FormDataParameterSubSchema) GetMinLength() int64 {
+ if x != nil {
+ return x.MinLength
}
return 0
}
-func (m *FormDataParameterSubSchema) GetPattern() string {
- if m != nil {
- return m.Pattern
+func (x *FormDataParameterSubSchema) GetPattern() string {
+ if x != nil {
+ return x.Pattern
}
return ""
}
-func (m *FormDataParameterSubSchema) GetMaxItems() int64 {
- if m != nil {
- return m.MaxItems
+func (x *FormDataParameterSubSchema) GetMaxItems() int64 {
+ if x != nil {
+ return x.MaxItems
}
return 0
}
-func (m *FormDataParameterSubSchema) GetMinItems() int64 {
- if m != nil {
- return m.MinItems
+func (x *FormDataParameterSubSchema) GetMinItems() int64 {
+ if x != nil {
+ return x.MinItems
}
return 0
}
-func (m *FormDataParameterSubSchema) GetUniqueItems() bool {
- if m != nil {
- return m.UniqueItems
+func (x *FormDataParameterSubSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
}
return false
}
-func (m *FormDataParameterSubSchema) GetEnum() []*Any {
- if m != nil {
- return m.Enum
+func (x *FormDataParameterSubSchema) GetEnum() []*Any {
+ if x != nil {
+ return x.Enum
}
return nil
}
-func (m *FormDataParameterSubSchema) GetMultipleOf() float64 {
- if m != nil {
- return m.MultipleOf
+func (x *FormDataParameterSubSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
}
return 0
}
-func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Header struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"`
- Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"`
- CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
- Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"`
- Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"`
- ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
- Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"`
- ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
- MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
- MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
- Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"`
- MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
- MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
- UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
- Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"`
- MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
- Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Header) Reset() { *m = Header{} }
-func (m *Header) String() string { return proto.CompactTextString(m) }
-func (*Header) ProtoMessage() {}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"`
+ Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"`
+ CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
+ Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"`
+ Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
+}
+
+func (x *Header) Reset() {
+ *x = Header{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Header) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Header.ProtoReflect.Descriptor instead.
func (*Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{13}
-}
-
-func (m *Header) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Header.Unmarshal(m, b)
-}
-func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Header.Marshal(b, m, deterministic)
-}
-func (m *Header) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Header.Merge(m, src)
-}
-func (m *Header) XXX_Size() int {
- return xxx_messageInfo_Header.Size(m)
-}
-func (m *Header) XXX_DiscardUnknown() {
- xxx_messageInfo_Header.DiscardUnknown(m)
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13}
}
-var xxx_messageInfo_Header proto.InternalMessageInfo
-
-func (m *Header) GetType() string {
- if m != nil {
- return m.Type
+func (x *Header) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *Header) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *Header) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *Header) GetItems() *PrimitivesItems {
- if m != nil {
- return m.Items
+func (x *Header) GetItems() *PrimitivesItems {
+ if x != nil {
+ return x.Items
}
return nil
}
-func (m *Header) GetCollectionFormat() string {
- if m != nil {
- return m.CollectionFormat
+func (x *Header) GetCollectionFormat() string {
+ if x != nil {
+ return x.CollectionFormat
}
return ""
}
-func (m *Header) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *Header) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *Header) GetMaximum() float64 {
- if m != nil {
- return m.Maximum
+func (x *Header) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
}
return 0
}
-func (m *Header) GetExclusiveMaximum() bool {
- if m != nil {
- return m.ExclusiveMaximum
+func (x *Header) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
}
return false
}
-func (m *Header) GetMinimum() float64 {
- if m != nil {
- return m.Minimum
+func (x *Header) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
}
return 0
}
-func (m *Header) GetExclusiveMinimum() bool {
- if m != nil {
- return m.ExclusiveMinimum
+func (x *Header) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
}
return false
}
-func (m *Header) GetMaxLength() int64 {
- if m != nil {
- return m.MaxLength
+func (x *Header) GetMaxLength() int64 {
+ if x != nil {
+ return x.MaxLength
}
return 0
}
-func (m *Header) GetMinLength() int64 {
- if m != nil {
- return m.MinLength
+func (x *Header) GetMinLength() int64 {
+ if x != nil {
+ return x.MinLength
}
return 0
}
-func (m *Header) GetPattern() string {
- if m != nil {
- return m.Pattern
+func (x *Header) GetPattern() string {
+ if x != nil {
+ return x.Pattern
}
return ""
}
-func (m *Header) GetMaxItems() int64 {
- if m != nil {
- return m.MaxItems
+func (x *Header) GetMaxItems() int64 {
+ if x != nil {
+ return x.MaxItems
}
return 0
}
-func (m *Header) GetMinItems() int64 {
- if m != nil {
- return m.MinItems
+func (x *Header) GetMinItems() int64 {
+ if x != nil {
+ return x.MinItems
}
return 0
}
-func (m *Header) GetUniqueItems() bool {
- if m != nil {
- return m.UniqueItems
+func (x *Header) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
}
return false
}
-func (m *Header) GetEnum() []*Any {
- if m != nil {
- return m.Enum
+func (x *Header) GetEnum() []*Any {
+ if x != nil {
+ return x.Enum
}
return nil
}
-func (m *Header) GetMultipleOf() float64 {
- if m != nil {
- return m.MultipleOf
+func (x *Header) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
}
return 0
}
-func (m *Header) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Header) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Header) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Header) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type HeaderParameterSubSchema struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Determines whether or not this parameter is required or optional.
Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"`
// Determines the location of the parameter.
@@ -1286,250 +1410,266 @@ type HeaderParameterSubSchema struct {
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// The name of the parameter.
- Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
- Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"`
- Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"`
- Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"`
- CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
- Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"`
- Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"`
- ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
- Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"`
- ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
- MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
- MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
- Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"`
- MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
- MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
- UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
- Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"`
- MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} }
-func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) }
-func (*HeaderParameterSubSchema) ProtoMessage() {}
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"`
+ Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"`
+ Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"`
+ CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
+ Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"`
+ Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
+}
+
+func (x *HeaderParameterSubSchema) Reset() {
+ *x = HeaderParameterSubSchema{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderParameterSubSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameterSubSchema) ProtoMessage() {}
+
+func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead.
func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{14}
-}
-
-func (m *HeaderParameterSubSchema) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HeaderParameterSubSchema.Unmarshal(m, b)
-}
-func (m *HeaderParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HeaderParameterSubSchema.Marshal(b, m, deterministic)
-}
-func (m *HeaderParameterSubSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HeaderParameterSubSchema.Merge(m, src)
-}
-func (m *HeaderParameterSubSchema) XXX_Size() int {
- return xxx_messageInfo_HeaderParameterSubSchema.Size(m)
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14}
}
-func (m *HeaderParameterSubSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_HeaderParameterSubSchema.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HeaderParameterSubSchema proto.InternalMessageInfo
-func (m *HeaderParameterSubSchema) GetRequired() bool {
- if m != nil {
- return m.Required
+func (x *HeaderParameterSubSchema) GetRequired() bool {
+ if x != nil {
+ return x.Required
}
return false
}
-func (m *HeaderParameterSubSchema) GetIn() string {
- if m != nil {
- return m.In
+func (x *HeaderParameterSubSchema) GetIn() string {
+ if x != nil {
+ return x.In
}
return ""
}
-func (m *HeaderParameterSubSchema) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *HeaderParameterSubSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *HeaderParameterSubSchema) GetName() string {
- if m != nil {
- return m.Name
+func (x *HeaderParameterSubSchema) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *HeaderParameterSubSchema) GetType() string {
- if m != nil {
- return m.Type
+func (x *HeaderParameterSubSchema) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *HeaderParameterSubSchema) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *HeaderParameterSubSchema) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems {
- if m != nil {
- return m.Items
+func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems {
+ if x != nil {
+ return x.Items
}
return nil
}
-func (m *HeaderParameterSubSchema) GetCollectionFormat() string {
- if m != nil {
- return m.CollectionFormat
+func (x *HeaderParameterSubSchema) GetCollectionFormat() string {
+ if x != nil {
+ return x.CollectionFormat
}
return ""
}
-func (m *HeaderParameterSubSchema) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *HeaderParameterSubSchema) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *HeaderParameterSubSchema) GetMaximum() float64 {
- if m != nil {
- return m.Maximum
+func (x *HeaderParameterSubSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
}
return 0
}
-func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool {
- if m != nil {
- return m.ExclusiveMaximum
+func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
}
return false
}
-func (m *HeaderParameterSubSchema) GetMinimum() float64 {
- if m != nil {
- return m.Minimum
+func (x *HeaderParameterSubSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
}
return 0
}
-func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool {
- if m != nil {
- return m.ExclusiveMinimum
+func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
}
return false
}
-func (m *HeaderParameterSubSchema) GetMaxLength() int64 {
- if m != nil {
- return m.MaxLength
+func (x *HeaderParameterSubSchema) GetMaxLength() int64 {
+ if x != nil {
+ return x.MaxLength
}
return 0
}
-func (m *HeaderParameterSubSchema) GetMinLength() int64 {
- if m != nil {
- return m.MinLength
+func (x *HeaderParameterSubSchema) GetMinLength() int64 {
+ if x != nil {
+ return x.MinLength
}
return 0
}
-func (m *HeaderParameterSubSchema) GetPattern() string {
- if m != nil {
- return m.Pattern
+func (x *HeaderParameterSubSchema) GetPattern() string {
+ if x != nil {
+ return x.Pattern
}
return ""
}
-func (m *HeaderParameterSubSchema) GetMaxItems() int64 {
- if m != nil {
- return m.MaxItems
+func (x *HeaderParameterSubSchema) GetMaxItems() int64 {
+ if x != nil {
+ return x.MaxItems
}
return 0
}
-func (m *HeaderParameterSubSchema) GetMinItems() int64 {
- if m != nil {
- return m.MinItems
+func (x *HeaderParameterSubSchema) GetMinItems() int64 {
+ if x != nil {
+ return x.MinItems
}
return 0
}
-func (m *HeaderParameterSubSchema) GetUniqueItems() bool {
- if m != nil {
- return m.UniqueItems
+func (x *HeaderParameterSubSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
}
return false
}
-func (m *HeaderParameterSubSchema) GetEnum() []*Any {
- if m != nil {
- return m.Enum
+func (x *HeaderParameterSubSchema) GetEnum() []*Any {
+ if x != nil {
+ return x.Enum
}
return nil
}
-func (m *HeaderParameterSubSchema) GetMultipleOf() float64 {
- if m != nil {
- return m.MultipleOf
+func (x *HeaderParameterSubSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
}
return 0
}
-func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Headers struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Headers) Reset() { *m = Headers{} }
-func (m *Headers) String() string { return proto.CompactTextString(m) }
-func (*Headers) ProtoMessage() {}
-func (*Headers) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{15}
+func (x *Headers) Reset() {
+ *x = Headers{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Headers) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Headers.Unmarshal(m, b)
-}
-func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Headers.Marshal(b, m, deterministic)
+func (x *Headers) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Headers) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Headers.Merge(m, src)
-}
-func (m *Headers) XXX_Size() int {
- return xxx_messageInfo_Headers.Size(m)
-}
-func (m *Headers) XXX_DiscardUnknown() {
- xxx_messageInfo_Headers.DiscardUnknown(m)
+
+func (*Headers) ProtoMessage() {}
+
+func (x *Headers) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Headers proto.InternalMessageInfo
+// Deprecated: Use Headers.ProtoReflect.Descriptor instead.
+func (*Headers) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15}
+}
-func (m *Headers) GetAdditionalProperties() []*NamedHeader {
- if m != nil {
- return m.AdditionalProperties
+func (x *Headers) GetAdditionalProperties() []*NamedHeader {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
// General information about the API.
type Info struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// A unique and precise title of the API.
Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
// A semantic version number of the API.
@@ -1537,797 +1677,885 @@ type Info struct {
// A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// The terms of service for the API.
- TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
- Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"`
- License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Info) Reset() { *m = Info{} }
-func (m *Info) String() string { return proto.CompactTextString(m) }
-func (*Info) ProtoMessage() {}
-func (*Info) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{16}
+ TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+ Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"`
+ License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Info) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Info.Unmarshal(m, b)
-}
-func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Info.Marshal(b, m, deterministic)
-}
-func (m *Info) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Info.Merge(m, src)
+func (x *Info) Reset() {
+ *x = Info{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Info) XXX_Size() int {
- return xxx_messageInfo_Info.Size(m)
+
+func (x *Info) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Info) XXX_DiscardUnknown() {
- xxx_messageInfo_Info.DiscardUnknown(m)
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Info proto.InternalMessageInfo
+// Deprecated: Use Info.ProtoReflect.Descriptor instead.
+func (*Info) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16}
+}
-func (m *Info) GetTitle() string {
- if m != nil {
- return m.Title
+func (x *Info) GetTitle() string {
+ if x != nil {
+ return x.Title
}
return ""
}
-func (m *Info) GetVersion() string {
- if m != nil {
- return m.Version
+func (x *Info) GetVersion() string {
+ if x != nil {
+ return x.Version
}
return ""
}
-func (m *Info) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Info) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Info) GetTermsOfService() string {
- if m != nil {
- return m.TermsOfService
+func (x *Info) GetTermsOfService() string {
+ if x != nil {
+ return x.TermsOfService
}
return ""
}
-func (m *Info) GetContact() *Contact {
- if m != nil {
- return m.Contact
+func (x *Info) GetContact() *Contact {
+ if x != nil {
+ return x.Contact
}
return nil
}
-func (m *Info) GetLicense() *License {
- if m != nil {
- return m.License
+func (x *Info) GetLicense() *License {
+ if x != nil {
+ return x.License
}
return nil
}
-func (m *Info) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Info) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type ItemsItem struct {
- Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *ItemsItem) Reset() { *m = ItemsItem{} }
-func (m *ItemsItem) String() string { return proto.CompactTextString(m) }
-func (*ItemsItem) ProtoMessage() {}
-func (*ItemsItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{17}
+ Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"`
}
-func (m *ItemsItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ItemsItem.Unmarshal(m, b)
-}
-func (m *ItemsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ItemsItem.Marshal(b, m, deterministic)
-}
-func (m *ItemsItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ItemsItem.Merge(m, src)
+func (x *ItemsItem) Reset() {
+ *x = ItemsItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ItemsItem) XXX_Size() int {
- return xxx_messageInfo_ItemsItem.Size(m)
+
+func (x *ItemsItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ItemsItem) XXX_DiscardUnknown() {
- xxx_messageInfo_ItemsItem.DiscardUnknown(m)
+
+func (*ItemsItem) ProtoMessage() {}
+
+func (x *ItemsItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ItemsItem proto.InternalMessageInfo
+// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead.
+func (*ItemsItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17}
+}
-func (m *ItemsItem) GetSchema() []*Schema {
- if m != nil {
- return m.Schema
+func (x *ItemsItem) GetSchema() []*Schema {
+ if x != nil {
+ return x.Schema
}
return nil
}
type JsonReference struct {
- XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"`
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *JsonReference) Reset() { *m = JsonReference{} }
-func (m *JsonReference) String() string { return proto.CompactTextString(m) }
-func (*JsonReference) ProtoMessage() {}
-func (*JsonReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{18}
+ XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
}
-func (m *JsonReference) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_JsonReference.Unmarshal(m, b)
-}
-func (m *JsonReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_JsonReference.Marshal(b, m, deterministic)
-}
-func (m *JsonReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JsonReference.Merge(m, src)
+func (x *JsonReference) Reset() {
+ *x = JsonReference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *JsonReference) XXX_Size() int {
- return xxx_messageInfo_JsonReference.Size(m)
+
+func (x *JsonReference) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *JsonReference) XXX_DiscardUnknown() {
- xxx_messageInfo_JsonReference.DiscardUnknown(m)
+
+func (*JsonReference) ProtoMessage() {}
+
+func (x *JsonReference) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_JsonReference proto.InternalMessageInfo
+// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead.
+func (*JsonReference) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18}
+}
-func (m *JsonReference) GetXRef() string {
- if m != nil {
- return m.XRef
+func (x *JsonReference) GetXRef() string {
+ if x != nil {
+ return x.XRef
}
return ""
}
-func (m *JsonReference) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *JsonReference) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
type License struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// The name of the license type. It's encouraged to use an OSI compatible license.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The URL pointing to the license.
- Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *License) Reset() { *m = License{} }
-func (m *License) String() string { return proto.CompactTextString(m) }
-func (*License) ProtoMessage() {}
-func (*License) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{19}
+func (x *License) Reset() {
+ *x = License{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *License) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_License.Unmarshal(m, b)
-}
-func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_License.Marshal(b, m, deterministic)
+func (x *License) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *License) XXX_Merge(src proto.Message) {
- xxx_messageInfo_License.Merge(m, src)
-}
-func (m *License) XXX_Size() int {
- return xxx_messageInfo_License.Size(m)
-}
-func (m *License) XXX_DiscardUnknown() {
- xxx_messageInfo_License.DiscardUnknown(m)
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_License proto.InternalMessageInfo
+// Deprecated: Use License.ProtoReflect.Descriptor instead.
+func (*License) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19}
+}
-func (m *License) GetName() string {
- if m != nil {
- return m.Name
+func (x *License) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *License) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *License) GetUrl() string {
+ if x != nil {
+ return x.Url
}
return ""
}
-func (m *License) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *License) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
type NamedAny struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedAny) Reset() { *m = NamedAny{} }
-func (m *NamedAny) String() string { return proto.CompactTextString(m) }
-func (*NamedAny) ProtoMessage() {}
-func (*NamedAny) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{20}
+func (x *NamedAny) Reset() {
+ *x = NamedAny{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedAny) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedAny.Unmarshal(m, b)
-}
-func (m *NamedAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedAny.Marshal(b, m, deterministic)
-}
-func (m *NamedAny) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedAny.Merge(m, src)
-}
-func (m *NamedAny) XXX_Size() int {
- return xxx_messageInfo_NamedAny.Size(m)
+func (x *NamedAny) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedAny) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedAny.DiscardUnknown(m)
+
+func (*NamedAny) ProtoMessage() {}
+
+func (x *NamedAny) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedAny proto.InternalMessageInfo
+// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead.
+func (*NamedAny) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20}
+}
-func (m *NamedAny) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedAny) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedAny) GetValue() *Any {
- if m != nil {
- return m.Value
+func (x *NamedAny) GetValue() *Any {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
type NamedHeader struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedHeader) Reset() { *m = NamedHeader{} }
-func (m *NamedHeader) String() string { return proto.CompactTextString(m) }
-func (*NamedHeader) ProtoMessage() {}
-func (*NamedHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{21}
+func (x *NamedHeader) Reset() {
+ *x = NamedHeader{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedHeader.Unmarshal(m, b)
-}
-func (m *NamedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedHeader.Marshal(b, m, deterministic)
-}
-func (m *NamedHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedHeader.Merge(m, src)
+func (x *NamedHeader) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedHeader) XXX_Size() int {
- return xxx_messageInfo_NamedHeader.Size(m)
-}
-func (m *NamedHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedHeader.DiscardUnknown(m)
+
+func (*NamedHeader) ProtoMessage() {}
+
+func (x *NamedHeader) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedHeader proto.InternalMessageInfo
+// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead.
+func (*NamedHeader) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21}
+}
-func (m *NamedHeader) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedHeader) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedHeader) GetValue() *Header {
- if m != nil {
- return m.Value
+func (x *NamedHeader) GetValue() *Header {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
type NamedParameter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedParameter) Reset() { *m = NamedParameter{} }
-func (m *NamedParameter) String() string { return proto.CompactTextString(m) }
-func (*NamedParameter) ProtoMessage() {}
-func (*NamedParameter) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{22}
+func (x *NamedParameter) Reset() {
+ *x = NamedParameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedParameter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedParameter.Unmarshal(m, b)
-}
-func (m *NamedParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedParameter.Marshal(b, m, deterministic)
+func (x *NamedParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedParameter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedParameter.Merge(m, src)
-}
-func (m *NamedParameter) XXX_Size() int {
- return xxx_messageInfo_NamedParameter.Size(m)
-}
-func (m *NamedParameter) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedParameter.DiscardUnknown(m)
+
+func (*NamedParameter) ProtoMessage() {}
+
+func (x *NamedParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedParameter proto.InternalMessageInfo
+// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead.
+func (*NamedParameter) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22}
+}
-func (m *NamedParameter) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedParameter) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedParameter) GetValue() *Parameter {
- if m != nil {
- return m.Value
+func (x *NamedParameter) GetValue() *Parameter {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
type NamedPathItem struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedPathItem) Reset() { *m = NamedPathItem{} }
-func (m *NamedPathItem) String() string { return proto.CompactTextString(m) }
-func (*NamedPathItem) ProtoMessage() {}
-func (*NamedPathItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{23}
+func (x *NamedPathItem) Reset() {
+ *x = NamedPathItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedPathItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedPathItem.Unmarshal(m, b)
+func (x *NamedPathItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedPathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedPathItem.Marshal(b, m, deterministic)
-}
-func (m *NamedPathItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedPathItem.Merge(m, src)
-}
-func (m *NamedPathItem) XXX_Size() int {
- return xxx_messageInfo_NamedPathItem.Size(m)
-}
-func (m *NamedPathItem) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedPathItem.DiscardUnknown(m)
+
+func (*NamedPathItem) ProtoMessage() {}
+
+func (x *NamedPathItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedPathItem proto.InternalMessageInfo
+// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead.
+func (*NamedPathItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23}
+}
-func (m *NamedPathItem) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedPathItem) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedPathItem) GetValue() *PathItem {
- if m != nil {
- return m.Value
+func (x *NamedPathItem) GetValue() *PathItem {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
type NamedResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedResponse) Reset() { *m = NamedResponse{} }
-func (m *NamedResponse) String() string { return proto.CompactTextString(m) }
-func (*NamedResponse) ProtoMessage() {}
-func (*NamedResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{24}
+func (x *NamedResponse) Reset() {
+ *x = NamedResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedResponse.Unmarshal(m, b)
-}
-func (m *NamedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedResponse.Marshal(b, m, deterministic)
-}
-func (m *NamedResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedResponse.Merge(m, src)
-}
-func (m *NamedResponse) XXX_Size() int {
- return xxx_messageInfo_NamedResponse.Size(m)
+func (x *NamedResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedResponse.DiscardUnknown(m)
+
+func (*NamedResponse) ProtoMessage() {}
+
+func (x *NamedResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedResponse proto.InternalMessageInfo
+// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead.
+func (*NamedResponse) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24}
+}
-func (m *NamedResponse) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedResponse) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedResponse) GetValue() *Response {
- if m != nil {
- return m.Value
+func (x *NamedResponse) GetValue() *Response {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
type NamedResponseValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} }
-func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) }
-func (*NamedResponseValue) ProtoMessage() {}
-func (*NamedResponseValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{25}
+func (x *NamedResponseValue) Reset() {
+ *x = NamedResponseValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedResponseValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedResponseValue.Unmarshal(m, b)
-}
-func (m *NamedResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedResponseValue.Marshal(b, m, deterministic)
-}
-func (m *NamedResponseValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedResponseValue.Merge(m, src)
+func (x *NamedResponseValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedResponseValue) XXX_Size() int {
- return xxx_messageInfo_NamedResponseValue.Size(m)
-}
-func (m *NamedResponseValue) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedResponseValue.DiscardUnknown(m)
+
+func (*NamedResponseValue) ProtoMessage() {}
+
+func (x *NamedResponseValue) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedResponseValue proto.InternalMessageInfo
+// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead.
+func (*NamedResponseValue) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25}
+}
-func (m *NamedResponseValue) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedResponseValue) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedResponseValue) GetValue() *ResponseValue {
- if m != nil {
- return m.Value
+func (x *NamedResponseValue) GetValue() *ResponseValue {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
type NamedSchema struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedSchema) Reset() { *m = NamedSchema{} }
-func (m *NamedSchema) String() string { return proto.CompactTextString(m) }
-func (*NamedSchema) ProtoMessage() {}
-func (*NamedSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{26}
+func (x *NamedSchema) Reset() {
+ *x = NamedSchema{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedSchema) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedSchema.Unmarshal(m, b)
-}
-func (m *NamedSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedSchema.Marshal(b, m, deterministic)
+func (x *NamedSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedSchema.Merge(m, src)
-}
-func (m *NamedSchema) XXX_Size() int {
- return xxx_messageInfo_NamedSchema.Size(m)
-}
-func (m *NamedSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedSchema.DiscardUnknown(m)
+
+func (*NamedSchema) ProtoMessage() {}
+
+func (x *NamedSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedSchema proto.InternalMessageInfo
+// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead.
+func (*NamedSchema) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26}
+}
-func (m *NamedSchema) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedSchema) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedSchema) GetValue() *Schema {
- if m != nil {
- return m.Value
+func (x *NamedSchema) GetValue() *Schema {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
type NamedSecurityDefinitionsItem struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} }
-func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) }
-func (*NamedSecurityDefinitionsItem) ProtoMessage() {}
-func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{27}
+func (x *NamedSecurityDefinitionsItem) Reset() {
+ *x = NamedSecurityDefinitionsItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedSecurityDefinitionsItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedSecurityDefinitionsItem.Unmarshal(m, b)
+func (x *NamedSecurityDefinitionsItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedSecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedSecurityDefinitionsItem.Marshal(b, m, deterministic)
-}
-func (m *NamedSecurityDefinitionsItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedSecurityDefinitionsItem.Merge(m, src)
-}
-func (m *NamedSecurityDefinitionsItem) XXX_Size() int {
- return xxx_messageInfo_NamedSecurityDefinitionsItem.Size(m)
-}
-func (m *NamedSecurityDefinitionsItem) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedSecurityDefinitionsItem.DiscardUnknown(m)
+
+func (*NamedSecurityDefinitionsItem) ProtoMessage() {}
+
+func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedSecurityDefinitionsItem proto.InternalMessageInfo
+// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead.
+func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27}
+}
-func (m *NamedSecurityDefinitionsItem) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedSecurityDefinitionsItem) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem {
- if m != nil {
- return m.Value
+func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem {
+ if x != nil {
+ return x.Value
}
return nil
}
// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
type NamedString struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedString) Reset() { *m = NamedString{} }
-func (m *NamedString) String() string { return proto.CompactTextString(m) }
-func (*NamedString) ProtoMessage() {}
-func (*NamedString) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{28}
+func (x *NamedString) Reset() {
+ *x = NamedString{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedString) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedString.Unmarshal(m, b)
-}
-func (m *NamedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedString.Marshal(b, m, deterministic)
-}
-func (m *NamedString) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedString.Merge(m, src)
-}
-func (m *NamedString) XXX_Size() int {
- return xxx_messageInfo_NamedString.Size(m)
+func (x *NamedString) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedString) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedString.DiscardUnknown(m)
+
+func (*NamedString) ProtoMessage() {}
+
+func (x *NamedString) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedString proto.InternalMessageInfo
+// Deprecated: Use NamedString.ProtoReflect.Descriptor instead.
+func (*NamedString) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28}
+}
-func (m *NamedString) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedString) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedString) GetValue() string {
- if m != nil {
- return m.Value
+func (x *NamedString) GetValue() string {
+ if x != nil {
+ return x.Value
}
return ""
}
// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
type NamedStringArray struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Map key
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Mapped value
- Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *NamedStringArray) Reset() { *m = NamedStringArray{} }
-func (m *NamedStringArray) String() string { return proto.CompactTextString(m) }
-func (*NamedStringArray) ProtoMessage() {}
-func (*NamedStringArray) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{29}
+func (x *NamedStringArray) Reset() {
+ *x = NamedStringArray{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NamedStringArray) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NamedStringArray.Unmarshal(m, b)
-}
-func (m *NamedStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NamedStringArray.Marshal(b, m, deterministic)
-}
-func (m *NamedStringArray) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedStringArray.Merge(m, src)
+func (x *NamedStringArray) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NamedStringArray) XXX_Size() int {
- return xxx_messageInfo_NamedStringArray.Size(m)
-}
-func (m *NamedStringArray) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedStringArray.DiscardUnknown(m)
+
+func (*NamedStringArray) ProtoMessage() {}
+
+func (x *NamedStringArray) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_NamedStringArray proto.InternalMessageInfo
+// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead.
+func (*NamedStringArray) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29}
+}
-func (m *NamedStringArray) GetName() string {
- if m != nil {
- return m.Name
+func (x *NamedStringArray) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *NamedStringArray) GetValue() *StringArray {
- if m != nil {
- return m.Value
+func (x *NamedStringArray) GetValue() *StringArray {
+ if x != nil {
+ return x.Value
}
return nil
}
type NonBodyParameter struct {
- // Types that are valid to be assigned to Oneof:
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Oneof:
// *NonBodyParameter_HeaderParameterSubSchema
// *NonBodyParameter_FormDataParameterSubSchema
// *NonBodyParameter_QueryParameterSubSchema
// *NonBodyParameter_PathParameterSubSchema
- Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"`
}
-func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} }
-func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) }
-func (*NonBodyParameter) ProtoMessage() {}
-func (*NonBodyParameter) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{30}
+func (x *NonBodyParameter) Reset() {
+ *x = NonBodyParameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *NonBodyParameter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NonBodyParameter.Unmarshal(m, b)
-}
-func (m *NonBodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NonBodyParameter.Marshal(b, m, deterministic)
-}
-func (m *NonBodyParameter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonBodyParameter.Merge(m, src)
-}
-func (m *NonBodyParameter) XXX_Size() int {
- return xxx_messageInfo_NonBodyParameter.Size(m)
-}
-func (m *NonBodyParameter) XXX_DiscardUnknown() {
- xxx_messageInfo_NonBodyParameter.DiscardUnknown(m)
+func (x *NonBodyParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_NonBodyParameter proto.InternalMessageInfo
+func (*NonBodyParameter) ProtoMessage() {}
-type isNonBodyParameter_Oneof interface {
- isNonBodyParameter_Oneof()
+func (x *NonBodyParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-type NonBodyParameter_HeaderParameterSubSchema struct {
- HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"`
-}
-
-type NonBodyParameter_FormDataParameterSubSchema struct {
- FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"`
-}
-
-type NonBodyParameter_QueryParameterSubSchema struct {
- QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"`
-}
-
-type NonBodyParameter_PathParameterSubSchema struct {
- PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"`
+// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead.
+func (*NonBodyParameter) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30}
}
-func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {}
-
-func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {}
-
-func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {}
-
-func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {}
-
func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof {
if m != nil {
return m.Oneof
@@ -2335,408 +2563,470 @@ func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof {
return nil
}
-func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema {
- if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok {
+func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema {
+ if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok {
return x.HeaderParameterSubSchema
}
return nil
}
-func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema {
- if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok {
+func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema {
+ if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok {
return x.FormDataParameterSubSchema
}
return nil
}
-func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema {
- if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok {
+func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema {
+ if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok {
return x.QueryParameterSubSchema
}
return nil
}
-func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema {
- if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok {
+func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema {
+ if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok {
return x.PathParameterSubSchema
}
return nil
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*NonBodyParameter) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*NonBodyParameter_HeaderParameterSubSchema)(nil),
- (*NonBodyParameter_FormDataParameterSubSchema)(nil),
- (*NonBodyParameter_QueryParameterSubSchema)(nil),
- (*NonBodyParameter_PathParameterSubSchema)(nil),
- }
+type isNonBodyParameter_Oneof interface {
+ isNonBodyParameter_Oneof()
}
-type Oauth2AccessCodeSecurity struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
- Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
- AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
- TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
- Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} }
-func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) }
-func (*Oauth2AccessCodeSecurity) ProtoMessage() {}
-func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{31}
+type NonBodyParameter_HeaderParameterSubSchema struct {
+ HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"`
}
-func (m *Oauth2AccessCodeSecurity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Oauth2AccessCodeSecurity.Unmarshal(m, b)
+type NonBodyParameter_FormDataParameterSubSchema struct {
+ FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"`
+}
+
+type NonBodyParameter_QueryParameterSubSchema struct {
+ QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"`
+}
+
+type NonBodyParameter_PathParameterSubSchema struct {
+ PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"`
}
-func (m *Oauth2AccessCodeSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Oauth2AccessCodeSecurity.Marshal(b, m, deterministic)
+
+func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {}
+
+func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {}
+
+func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {}
+
+func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {}
+
+type Oauth2AccessCodeSecurity struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
+ Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+ TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+ Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Oauth2AccessCodeSecurity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Oauth2AccessCodeSecurity.Merge(m, src)
+
+func (x *Oauth2AccessCodeSecurity) Reset() {
+ *x = Oauth2AccessCodeSecurity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Oauth2AccessCodeSecurity) XXX_Size() int {
- return xxx_messageInfo_Oauth2AccessCodeSecurity.Size(m)
+
+func (x *Oauth2AccessCodeSecurity) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Oauth2AccessCodeSecurity) XXX_DiscardUnknown() {
- xxx_messageInfo_Oauth2AccessCodeSecurity.DiscardUnknown(m)
+
+func (*Oauth2AccessCodeSecurity) ProtoMessage() {}
+
+func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Oauth2AccessCodeSecurity proto.InternalMessageInfo
+// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead.
+func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31}
+}
-func (m *Oauth2AccessCodeSecurity) GetType() string {
- if m != nil {
- return m.Type
+func (x *Oauth2AccessCodeSecurity) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *Oauth2AccessCodeSecurity) GetFlow() string {
- if m != nil {
- return m.Flow
+func (x *Oauth2AccessCodeSecurity) GetFlow() string {
+ if x != nil {
+ return x.Flow
}
return ""
}
-func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes {
- if m != nil {
- return m.Scopes
+func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes {
+ if x != nil {
+ return x.Scopes
}
return nil
}
-func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string {
- if m != nil {
- return m.AuthorizationUrl
+func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string {
+ if x != nil {
+ return x.AuthorizationUrl
}
return ""
}
-func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string {
- if m != nil {
- return m.TokenUrl
+func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string {
+ if x != nil {
+ return x.TokenUrl
}
return ""
}
-func (m *Oauth2AccessCodeSecurity) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Oauth2AccessCodeSecurity) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Oauth2ApplicationSecurity struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
- Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
- TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
- Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} }
-func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) }
-func (*Oauth2ApplicationSecurity) ProtoMessage() {}
-func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{32}
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Oauth2ApplicationSecurity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Oauth2ApplicationSecurity.Unmarshal(m, b)
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
+ Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+ Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Oauth2ApplicationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Oauth2ApplicationSecurity.Marshal(b, m, deterministic)
-}
-func (m *Oauth2ApplicationSecurity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Oauth2ApplicationSecurity.Merge(m, src)
+
+func (x *Oauth2ApplicationSecurity) Reset() {
+ *x = Oauth2ApplicationSecurity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Oauth2ApplicationSecurity) XXX_Size() int {
- return xxx_messageInfo_Oauth2ApplicationSecurity.Size(m)
+
+func (x *Oauth2ApplicationSecurity) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Oauth2ApplicationSecurity) XXX_DiscardUnknown() {
- xxx_messageInfo_Oauth2ApplicationSecurity.DiscardUnknown(m)
+
+func (*Oauth2ApplicationSecurity) ProtoMessage() {}
+
+func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Oauth2ApplicationSecurity proto.InternalMessageInfo
+// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead.
+func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32}
+}
-func (m *Oauth2ApplicationSecurity) GetType() string {
- if m != nil {
- return m.Type
+func (x *Oauth2ApplicationSecurity) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *Oauth2ApplicationSecurity) GetFlow() string {
- if m != nil {
- return m.Flow
+func (x *Oauth2ApplicationSecurity) GetFlow() string {
+ if x != nil {
+ return x.Flow
}
return ""
}
-func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes {
- if m != nil {
- return m.Scopes
+func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes {
+ if x != nil {
+ return x.Scopes
}
return nil
}
-func (m *Oauth2ApplicationSecurity) GetTokenUrl() string {
- if m != nil {
- return m.TokenUrl
+func (x *Oauth2ApplicationSecurity) GetTokenUrl() string {
+ if x != nil {
+ return x.TokenUrl
}
return ""
}
-func (m *Oauth2ApplicationSecurity) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Oauth2ApplicationSecurity) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Oauth2ImplicitSecurity struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
- Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
- AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
- Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} }
-func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) }
-func (*Oauth2ImplicitSecurity) ProtoMessage() {}
-func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{33}
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Oauth2ImplicitSecurity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Oauth2ImplicitSecurity.Unmarshal(m, b)
-}
-func (m *Oauth2ImplicitSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Oauth2ImplicitSecurity.Marshal(b, m, deterministic)
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
+ Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+ Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Oauth2ImplicitSecurity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Oauth2ImplicitSecurity.Merge(m, src)
+
+func (x *Oauth2ImplicitSecurity) Reset() {
+ *x = Oauth2ImplicitSecurity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Oauth2ImplicitSecurity) XXX_Size() int {
- return xxx_messageInfo_Oauth2ImplicitSecurity.Size(m)
+
+func (x *Oauth2ImplicitSecurity) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Oauth2ImplicitSecurity) XXX_DiscardUnknown() {
- xxx_messageInfo_Oauth2ImplicitSecurity.DiscardUnknown(m)
+
+func (*Oauth2ImplicitSecurity) ProtoMessage() {}
+
+func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Oauth2ImplicitSecurity proto.InternalMessageInfo
+// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead.
+func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33}
+}
-func (m *Oauth2ImplicitSecurity) GetType() string {
- if m != nil {
- return m.Type
+func (x *Oauth2ImplicitSecurity) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *Oauth2ImplicitSecurity) GetFlow() string {
- if m != nil {
- return m.Flow
+func (x *Oauth2ImplicitSecurity) GetFlow() string {
+ if x != nil {
+ return x.Flow
}
return ""
}
-func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes {
- if m != nil {
- return m.Scopes
+func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes {
+ if x != nil {
+ return x.Scopes
}
return nil
}
-func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string {
- if m != nil {
- return m.AuthorizationUrl
+func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string {
+ if x != nil {
+ return x.AuthorizationUrl
}
return ""
}
-func (m *Oauth2ImplicitSecurity) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Oauth2ImplicitSecurity) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Oauth2PasswordSecurity struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
- Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
- TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
- Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} }
-func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) }
-func (*Oauth2PasswordSecurity) ProtoMessage() {}
-func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{34}
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Oauth2PasswordSecurity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Oauth2PasswordSecurity.Unmarshal(m, b)
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"`
+ Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+ Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Oauth2PasswordSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Oauth2PasswordSecurity.Marshal(b, m, deterministic)
-}
-func (m *Oauth2PasswordSecurity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Oauth2PasswordSecurity.Merge(m, src)
+
+func (x *Oauth2PasswordSecurity) Reset() {
+ *x = Oauth2PasswordSecurity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Oauth2PasswordSecurity) XXX_Size() int {
- return xxx_messageInfo_Oauth2PasswordSecurity.Size(m)
+
+func (x *Oauth2PasswordSecurity) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Oauth2PasswordSecurity) XXX_DiscardUnknown() {
- xxx_messageInfo_Oauth2PasswordSecurity.DiscardUnknown(m)
+
+func (*Oauth2PasswordSecurity) ProtoMessage() {}
+
+func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Oauth2PasswordSecurity proto.InternalMessageInfo
+// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead.
+func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34}
+}
-func (m *Oauth2PasswordSecurity) GetType() string {
- if m != nil {
- return m.Type
+func (x *Oauth2PasswordSecurity) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *Oauth2PasswordSecurity) GetFlow() string {
- if m != nil {
- return m.Flow
+func (x *Oauth2PasswordSecurity) GetFlow() string {
+ if x != nil {
+ return x.Flow
}
return ""
}
-func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes {
- if m != nil {
- return m.Scopes
+func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes {
+ if x != nil {
+ return x.Scopes
}
return nil
}
-func (m *Oauth2PasswordSecurity) GetTokenUrl() string {
- if m != nil {
- return m.TokenUrl
+func (x *Oauth2PasswordSecurity) GetTokenUrl() string {
+ if x != nil {
+ return x.TokenUrl
}
return ""
}
-func (m *Oauth2PasswordSecurity) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Oauth2PasswordSecurity) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Oauth2Scopes struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} }
-func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) }
-func (*Oauth2Scopes) ProtoMessage() {}
-func (*Oauth2Scopes) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{35}
+func (x *Oauth2Scopes) Reset() {
+ *x = Oauth2Scopes{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Oauth2Scopes) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Oauth2Scopes.Unmarshal(m, b)
-}
-func (m *Oauth2Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Oauth2Scopes.Marshal(b, m, deterministic)
-}
-func (m *Oauth2Scopes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Oauth2Scopes.Merge(m, src)
+func (x *Oauth2Scopes) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Oauth2Scopes) XXX_Size() int {
- return xxx_messageInfo_Oauth2Scopes.Size(m)
-}
-func (m *Oauth2Scopes) XXX_DiscardUnknown() {
- xxx_messageInfo_Oauth2Scopes.DiscardUnknown(m)
+
+func (*Oauth2Scopes) ProtoMessage() {}
+
+func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Oauth2Scopes proto.InternalMessageInfo
+// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead.
+func (*Oauth2Scopes) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35}
+}
-func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString {
- if m != nil {
- return m.AdditionalProperties
+func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
type Operation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
// A brief summary of the operation.
Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
@@ -2753,182 +3043,178 @@ type Operation struct {
Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"`
Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"`
// The transfer protocol of the API.
- Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"`
- Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
- Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Operation) Reset() { *m = Operation{} }
-func (m *Operation) String() string { return proto.CompactTextString(m) }
-func (*Operation) ProtoMessage() {}
-func (*Operation) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{36}
+ Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"`
+ Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+ Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Operation) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Operation.Unmarshal(m, b)
-}
-func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Operation.Marshal(b, m, deterministic)
-}
-func (m *Operation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Operation.Merge(m, src)
+func (x *Operation) Reset() {
+ *x = Operation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Operation) XXX_Size() int {
- return xxx_messageInfo_Operation.Size(m)
+
+func (x *Operation) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Operation) XXX_DiscardUnknown() {
- xxx_messageInfo_Operation.DiscardUnknown(m)
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Operation proto.InternalMessageInfo
+// Deprecated: Use Operation.ProtoReflect.Descriptor instead.
+func (*Operation) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36}
+}
-func (m *Operation) GetTags() []string {
- if m != nil {
- return m.Tags
+func (x *Operation) GetTags() []string {
+ if x != nil {
+ return x.Tags
}
return nil
}
-func (m *Operation) GetSummary() string {
- if m != nil {
- return m.Summary
+func (x *Operation) GetSummary() string {
+ if x != nil {
+ return x.Summary
}
return ""
}
-func (m *Operation) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Operation) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Operation) GetExternalDocs() *ExternalDocs {
- if m != nil {
- return m.ExternalDocs
+func (x *Operation) GetExternalDocs() *ExternalDocs {
+ if x != nil {
+ return x.ExternalDocs
}
return nil
}
-func (m *Operation) GetOperationId() string {
- if m != nil {
- return m.OperationId
+func (x *Operation) GetOperationId() string {
+ if x != nil {
+ return x.OperationId
}
return ""
}
-func (m *Operation) GetProduces() []string {
- if m != nil {
- return m.Produces
+func (x *Operation) GetProduces() []string {
+ if x != nil {
+ return x.Produces
}
return nil
}
-func (m *Operation) GetConsumes() []string {
- if m != nil {
- return m.Consumes
+func (x *Operation) GetConsumes() []string {
+ if x != nil {
+ return x.Consumes
}
return nil
}
-func (m *Operation) GetParameters() []*ParametersItem {
- if m != nil {
- return m.Parameters
+func (x *Operation) GetParameters() []*ParametersItem {
+ if x != nil {
+ return x.Parameters
}
return nil
}
-func (m *Operation) GetResponses() *Responses {
- if m != nil {
- return m.Responses
+func (x *Operation) GetResponses() *Responses {
+ if x != nil {
+ return x.Responses
}
return nil
}
-func (m *Operation) GetSchemes() []string {
- if m != nil {
- return m.Schemes
+func (x *Operation) GetSchemes() []string {
+ if x != nil {
+ return x.Schemes
}
return nil
}
-func (m *Operation) GetDeprecated() bool {
- if m != nil {
- return m.Deprecated
+func (x *Operation) GetDeprecated() bool {
+ if x != nil {
+ return x.Deprecated
}
return false
}
-func (m *Operation) GetSecurity() []*SecurityRequirement {
- if m != nil {
- return m.Security
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ return x.Security
}
return nil
}
-func (m *Operation) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Operation) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Parameter struct {
- // Types that are valid to be assigned to Oneof:
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Oneof:
// *Parameter_BodyParameter
// *Parameter_NonBodyParameter
- Oneof isParameter_Oneof `protobuf_oneof:"oneof"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Oneof isParameter_Oneof `protobuf_oneof:"oneof"`
}
-func (m *Parameter) Reset() { *m = Parameter{} }
-func (m *Parameter) String() string { return proto.CompactTextString(m) }
-func (*Parameter) ProtoMessage() {}
-func (*Parameter) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{37}
+func (x *Parameter) Reset() {
+ *x = Parameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Parameter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Parameter.Unmarshal(m, b)
-}
-func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Parameter.Marshal(b, m, deterministic)
-}
-func (m *Parameter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Parameter.Merge(m, src)
-}
-func (m *Parameter) XXX_Size() int {
- return xxx_messageInfo_Parameter.Size(m)
-}
-func (m *Parameter) XXX_DiscardUnknown() {
- xxx_messageInfo_Parameter.DiscardUnknown(m)
+func (x *Parameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_Parameter proto.InternalMessageInfo
-
-type isParameter_Oneof interface {
- isParameter_Oneof()
-}
+func (*Parameter) ProtoMessage() {}
-type Parameter_BodyParameter struct {
- BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"`
+func (x *Parameter) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-type Parameter_NonBodyParameter struct {
- NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"`
+// Deprecated: Use Parameter.ProtoReflect.Descriptor instead.
+func (*Parameter) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37}
}
-func (*Parameter_BodyParameter) isParameter_Oneof() {}
-
-func (*Parameter_NonBodyParameter) isParameter_Oneof() {}
-
func (m *Parameter) GetOneof() isParameter_Oneof {
if m != nil {
return m.Oneof
@@ -2936,119 +3222,127 @@ func (m *Parameter) GetOneof() isParameter_Oneof {
return nil
}
-func (m *Parameter) GetBodyParameter() *BodyParameter {
- if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok {
+func (x *Parameter) GetBodyParameter() *BodyParameter {
+ if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok {
return x.BodyParameter
}
return nil
}
-func (m *Parameter) GetNonBodyParameter() *NonBodyParameter {
- if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok {
+func (x *Parameter) GetNonBodyParameter() *NonBodyParameter {
+ if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok {
return x.NonBodyParameter
}
return nil
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Parameter) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Parameter_BodyParameter)(nil),
- (*Parameter_NonBodyParameter)(nil),
- }
+type isParameter_Oneof interface {
+ isParameter_Oneof()
+}
+
+type Parameter_BodyParameter struct {
+ BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"`
+}
+
+type Parameter_NonBodyParameter struct {
+ NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"`
}
+func (*Parameter_BodyParameter) isParameter_Oneof() {}
+
+func (*Parameter_NonBodyParameter) isParameter_Oneof() {}
+
// One or more JSON representations for parameters
type ParameterDefinitions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} }
-func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) }
-func (*ParameterDefinitions) ProtoMessage() {}
-func (*ParameterDefinitions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{38}
+func (x *ParameterDefinitions) Reset() {
+ *x = ParameterDefinitions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ParameterDefinitions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ParameterDefinitions.Unmarshal(m, b)
+func (x *ParameterDefinitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ParameterDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ParameterDefinitions.Marshal(b, m, deterministic)
-}
-func (m *ParameterDefinitions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParameterDefinitions.Merge(m, src)
-}
-func (m *ParameterDefinitions) XXX_Size() int {
- return xxx_messageInfo_ParameterDefinitions.Size(m)
-}
-func (m *ParameterDefinitions) XXX_DiscardUnknown() {
- xxx_messageInfo_ParameterDefinitions.DiscardUnknown(m)
+
+func (*ParameterDefinitions) ProtoMessage() {}
+
+func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ParameterDefinitions proto.InternalMessageInfo
+// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead.
+func (*ParameterDefinitions) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38}
+}
-func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter {
- if m != nil {
- return m.AdditionalProperties
+func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
type ParametersItem struct {
- // Types that are valid to be assigned to Oneof:
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Oneof:
// *ParametersItem_Parameter
// *ParametersItem_JsonReference
- Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"`
}
-func (m *ParametersItem) Reset() { *m = ParametersItem{} }
-func (m *ParametersItem) String() string { return proto.CompactTextString(m) }
-func (*ParametersItem) ProtoMessage() {}
-func (*ParametersItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{39}
+func (x *ParametersItem) Reset() {
+ *x = ParametersItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ParametersItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ParametersItem.Unmarshal(m, b)
-}
-func (m *ParametersItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ParametersItem.Marshal(b, m, deterministic)
-}
-func (m *ParametersItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParametersItem.Merge(m, src)
-}
-func (m *ParametersItem) XXX_Size() int {
- return xxx_messageInfo_ParametersItem.Size(m)
-}
-func (m *ParametersItem) XXX_DiscardUnknown() {
- xxx_messageInfo_ParametersItem.DiscardUnknown(m)
+func (x *ParametersItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_ParametersItem proto.InternalMessageInfo
-
-type isParametersItem_Oneof interface {
- isParametersItem_Oneof()
-}
+func (*ParametersItem) ProtoMessage() {}
-type ParametersItem_Parameter struct {
- Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"`
+func (x *ParametersItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-type ParametersItem_JsonReference struct {
- JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"`
+// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead.
+func (*ParametersItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39}
}
-func (*ParametersItem_Parameter) isParametersItem_Oneof() {}
-
-func (*ParametersItem_JsonReference) isParametersItem_Oneof() {}
-
func (m *ParametersItem) GetOneof() isParametersItem_Oneof {
if m != nil {
return m.Oneof
@@ -3056,29 +3350,41 @@ func (m *ParametersItem) GetOneof() isParametersItem_Oneof {
return nil
}
-func (m *ParametersItem) GetParameter() *Parameter {
- if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok {
+func (x *ParametersItem) GetParameter() *Parameter {
+ if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok {
return x.Parameter
}
return nil
}
-func (m *ParametersItem) GetJsonReference() *JsonReference {
- if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok {
+func (x *ParametersItem) GetJsonReference() *JsonReference {
+ if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok {
return x.JsonReference
}
return nil
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ParametersItem) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ParametersItem_Parameter)(nil),
- (*ParametersItem_JsonReference)(nil),
- }
+type isParametersItem_Oneof interface {
+ isParametersItem_Oneof()
+}
+
+type ParametersItem_Parameter struct {
+ Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"`
+}
+
+type ParametersItem_JsonReference struct {
+ JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"`
}
+func (*ParametersItem_Parameter) isParametersItem_Oneof() {}
+
+func (*ParametersItem_JsonReference) isParametersItem_Oneof() {}
+
type PathItem struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"`
Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"`
Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"`
@@ -3088,109 +3394,117 @@ type PathItem struct {
Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"`
Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"`
// The parameters needed to send a valid API call.
- Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *PathItem) Reset() { *m = PathItem{} }
-func (m *PathItem) String() string { return proto.CompactTextString(m) }
-func (*PathItem) ProtoMessage() {}
-func (*PathItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{40}
+func (x *PathItem) Reset() {
+ *x = PathItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *PathItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PathItem.Unmarshal(m, b)
-}
-func (m *PathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PathItem.Marshal(b, m, deterministic)
-}
-func (m *PathItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PathItem.Merge(m, src)
+func (x *PathItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *PathItem) XXX_Size() int {
- return xxx_messageInfo_PathItem.Size(m)
-}
-func (m *PathItem) XXX_DiscardUnknown() {
- xxx_messageInfo_PathItem.DiscardUnknown(m)
+
+func (*PathItem) ProtoMessage() {}
+
+func (x *PathItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_PathItem proto.InternalMessageInfo
+// Deprecated: Use PathItem.ProtoReflect.Descriptor instead.
+func (*PathItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40}
+}
-func (m *PathItem) GetXRef() string {
- if m != nil {
- return m.XRef
+func (x *PathItem) GetXRef() string {
+ if x != nil {
+ return x.XRef
}
return ""
}
-func (m *PathItem) GetGet() *Operation {
- if m != nil {
- return m.Get
+func (x *PathItem) GetGet() *Operation {
+ if x != nil {
+ return x.Get
}
return nil
}
-func (m *PathItem) GetPut() *Operation {
- if m != nil {
- return m.Put
+func (x *PathItem) GetPut() *Operation {
+ if x != nil {
+ return x.Put
}
return nil
}
-func (m *PathItem) GetPost() *Operation {
- if m != nil {
- return m.Post
+func (x *PathItem) GetPost() *Operation {
+ if x != nil {
+ return x.Post
}
return nil
}
-func (m *PathItem) GetDelete() *Operation {
- if m != nil {
- return m.Delete
+func (x *PathItem) GetDelete() *Operation {
+ if x != nil {
+ return x.Delete
}
return nil
}
-func (m *PathItem) GetOptions() *Operation {
- if m != nil {
- return m.Options
+func (x *PathItem) GetOptions() *Operation {
+ if x != nil {
+ return x.Options
}
return nil
}
-func (m *PathItem) GetHead() *Operation {
- if m != nil {
- return m.Head
+func (x *PathItem) GetHead() *Operation {
+ if x != nil {
+ return x.Head
}
return nil
}
-func (m *PathItem) GetPatch() *Operation {
- if m != nil {
- return m.Patch
+func (x *PathItem) GetPatch() *Operation {
+ if x != nil {
+ return x.Patch
}
return nil
}
-func (m *PathItem) GetParameters() []*ParametersItem {
- if m != nil {
- return m.Parameters
+func (x *PathItem) GetParameters() []*ParametersItem {
+ if x != nil {
+ return x.Parameters
}
return nil
}
-func (m *PathItem) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *PathItem) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type PathParameterSubSchema struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Determines whether or not this parameter is required or optional.
Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"`
// Determines the location of the parameter.
@@ -3198,472 +3512,504 @@ type PathParameterSubSchema struct {
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// The name of the parameter.
- Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
- Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"`
- Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"`
- Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"`
- CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
- Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"`
- Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"`
- ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
- Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"`
- ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
- MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
- MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
- Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"`
- MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
- MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
- UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
- Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"`
- MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} }
-func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) }
-func (*PathParameterSubSchema) ProtoMessage() {}
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"`
+ Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"`
+ Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"`
+ CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
+ Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"`
+ Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
+}
+
+func (x *PathParameterSubSchema) Reset() {
+ *x = PathParameterSubSchema{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PathParameterSubSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PathParameterSubSchema) ProtoMessage() {}
+
+func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead.
func (*PathParameterSubSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{41}
-}
-
-func (m *PathParameterSubSchema) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PathParameterSubSchema.Unmarshal(m, b)
-}
-func (m *PathParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PathParameterSubSchema.Marshal(b, m, deterministic)
-}
-func (m *PathParameterSubSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PathParameterSubSchema.Merge(m, src)
-}
-func (m *PathParameterSubSchema) XXX_Size() int {
- return xxx_messageInfo_PathParameterSubSchema.Size(m)
-}
-func (m *PathParameterSubSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_PathParameterSubSchema.DiscardUnknown(m)
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41}
}
-var xxx_messageInfo_PathParameterSubSchema proto.InternalMessageInfo
-
-func (m *PathParameterSubSchema) GetRequired() bool {
- if m != nil {
- return m.Required
+func (x *PathParameterSubSchema) GetRequired() bool {
+ if x != nil {
+ return x.Required
}
return false
}
-func (m *PathParameterSubSchema) GetIn() string {
- if m != nil {
- return m.In
+func (x *PathParameterSubSchema) GetIn() string {
+ if x != nil {
+ return x.In
}
return ""
}
-func (m *PathParameterSubSchema) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *PathParameterSubSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *PathParameterSubSchema) GetName() string {
- if m != nil {
- return m.Name
+func (x *PathParameterSubSchema) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *PathParameterSubSchema) GetType() string {
- if m != nil {
- return m.Type
+func (x *PathParameterSubSchema) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *PathParameterSubSchema) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *PathParameterSubSchema) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *PathParameterSubSchema) GetItems() *PrimitivesItems {
- if m != nil {
- return m.Items
+func (x *PathParameterSubSchema) GetItems() *PrimitivesItems {
+ if x != nil {
+ return x.Items
}
return nil
}
-func (m *PathParameterSubSchema) GetCollectionFormat() string {
- if m != nil {
- return m.CollectionFormat
+func (x *PathParameterSubSchema) GetCollectionFormat() string {
+ if x != nil {
+ return x.CollectionFormat
}
return ""
}
-func (m *PathParameterSubSchema) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *PathParameterSubSchema) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *PathParameterSubSchema) GetMaximum() float64 {
- if m != nil {
- return m.Maximum
+func (x *PathParameterSubSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
}
return 0
}
-func (m *PathParameterSubSchema) GetExclusiveMaximum() bool {
- if m != nil {
- return m.ExclusiveMaximum
+func (x *PathParameterSubSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
}
return false
}
-func (m *PathParameterSubSchema) GetMinimum() float64 {
- if m != nil {
- return m.Minimum
+func (x *PathParameterSubSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
}
return 0
}
-func (m *PathParameterSubSchema) GetExclusiveMinimum() bool {
- if m != nil {
- return m.ExclusiveMinimum
+func (x *PathParameterSubSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
}
return false
}
-func (m *PathParameterSubSchema) GetMaxLength() int64 {
- if m != nil {
- return m.MaxLength
+func (x *PathParameterSubSchema) GetMaxLength() int64 {
+ if x != nil {
+ return x.MaxLength
}
return 0
}
-func (m *PathParameterSubSchema) GetMinLength() int64 {
- if m != nil {
- return m.MinLength
+func (x *PathParameterSubSchema) GetMinLength() int64 {
+ if x != nil {
+ return x.MinLength
}
return 0
}
-func (m *PathParameterSubSchema) GetPattern() string {
- if m != nil {
- return m.Pattern
+func (x *PathParameterSubSchema) GetPattern() string {
+ if x != nil {
+ return x.Pattern
}
return ""
}
-func (m *PathParameterSubSchema) GetMaxItems() int64 {
- if m != nil {
- return m.MaxItems
+func (x *PathParameterSubSchema) GetMaxItems() int64 {
+ if x != nil {
+ return x.MaxItems
}
return 0
}
-func (m *PathParameterSubSchema) GetMinItems() int64 {
- if m != nil {
- return m.MinItems
+func (x *PathParameterSubSchema) GetMinItems() int64 {
+ if x != nil {
+ return x.MinItems
}
return 0
}
-func (m *PathParameterSubSchema) GetUniqueItems() bool {
- if m != nil {
- return m.UniqueItems
+func (x *PathParameterSubSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
}
return false
}
-func (m *PathParameterSubSchema) GetEnum() []*Any {
- if m != nil {
- return m.Enum
+func (x *PathParameterSubSchema) GetEnum() []*Any {
+ if x != nil {
+ return x.Enum
}
return nil
}
-func (m *PathParameterSubSchema) GetMultipleOf() float64 {
- if m != nil {
- return m.MultipleOf
+func (x *PathParameterSubSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
}
return 0
}
-func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
type Paths struct {
- VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Paths) Reset() { *m = Paths{} }
-func (m *Paths) String() string { return proto.CompactTextString(m) }
-func (*Paths) ProtoMessage() {}
-func (*Paths) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{42}
+ VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
+ Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"`
}
-func (m *Paths) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Paths.Unmarshal(m, b)
-}
-func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Paths.Marshal(b, m, deterministic)
-}
-func (m *Paths) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Paths.Merge(m, src)
+func (x *Paths) Reset() {
+ *x = Paths{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Paths) XXX_Size() int {
- return xxx_messageInfo_Paths.Size(m)
+
+func (x *Paths) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Paths) XXX_DiscardUnknown() {
- xxx_messageInfo_Paths.DiscardUnknown(m)
+
+func (*Paths) ProtoMessage() {}
+
+func (x *Paths) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Paths proto.InternalMessageInfo
+// Deprecated: Use Paths.ProtoReflect.Descriptor instead.
+func (*Paths) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42}
+}
-func (m *Paths) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Paths) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
-func (m *Paths) GetPath() []*NamedPathItem {
- if m != nil {
- return m.Path
+func (x *Paths) GetPath() []*NamedPathItem {
+ if x != nil {
+ return x.Path
}
return nil
}
type PrimitivesItems struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"`
- Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"`
- CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
- Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"`
- Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"`
- ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
- Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"`
- ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
- MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
- MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
- Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"`
- MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
- MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
- UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
- Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"`
- MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} }
-func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) }
-func (*PrimitivesItems) ProtoMessage() {}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"`
+ Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"`
+ CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
+ Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"`
+ Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
+}
+
+func (x *PrimitivesItems) Reset() {
+ *x = PrimitivesItems{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PrimitivesItems) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PrimitivesItems) ProtoMessage() {}
+
+func (x *PrimitivesItems) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead.
func (*PrimitivesItems) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{43}
-}
-
-func (m *PrimitivesItems) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PrimitivesItems.Unmarshal(m, b)
-}
-func (m *PrimitivesItems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PrimitivesItems.Marshal(b, m, deterministic)
-}
-func (m *PrimitivesItems) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PrimitivesItems.Merge(m, src)
-}
-func (m *PrimitivesItems) XXX_Size() int {
- return xxx_messageInfo_PrimitivesItems.Size(m)
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43}
}
-func (m *PrimitivesItems) XXX_DiscardUnknown() {
- xxx_messageInfo_PrimitivesItems.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PrimitivesItems proto.InternalMessageInfo
-func (m *PrimitivesItems) GetType() string {
- if m != nil {
- return m.Type
+func (x *PrimitivesItems) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *PrimitivesItems) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *PrimitivesItems) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *PrimitivesItems) GetItems() *PrimitivesItems {
- if m != nil {
- return m.Items
+func (x *PrimitivesItems) GetItems() *PrimitivesItems {
+ if x != nil {
+ return x.Items
}
return nil
}
-func (m *PrimitivesItems) GetCollectionFormat() string {
- if m != nil {
- return m.CollectionFormat
+func (x *PrimitivesItems) GetCollectionFormat() string {
+ if x != nil {
+ return x.CollectionFormat
}
return ""
}
-func (m *PrimitivesItems) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *PrimitivesItems) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *PrimitivesItems) GetMaximum() float64 {
- if m != nil {
- return m.Maximum
+func (x *PrimitivesItems) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
}
return 0
}
-func (m *PrimitivesItems) GetExclusiveMaximum() bool {
- if m != nil {
- return m.ExclusiveMaximum
+func (x *PrimitivesItems) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
}
return false
}
-func (m *PrimitivesItems) GetMinimum() float64 {
- if m != nil {
- return m.Minimum
+func (x *PrimitivesItems) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
}
return 0
}
-func (m *PrimitivesItems) GetExclusiveMinimum() bool {
- if m != nil {
- return m.ExclusiveMinimum
+func (x *PrimitivesItems) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
}
return false
}
-func (m *PrimitivesItems) GetMaxLength() int64 {
- if m != nil {
- return m.MaxLength
+func (x *PrimitivesItems) GetMaxLength() int64 {
+ if x != nil {
+ return x.MaxLength
}
return 0
}
-func (m *PrimitivesItems) GetMinLength() int64 {
- if m != nil {
- return m.MinLength
+func (x *PrimitivesItems) GetMinLength() int64 {
+ if x != nil {
+ return x.MinLength
}
return 0
}
-func (m *PrimitivesItems) GetPattern() string {
- if m != nil {
- return m.Pattern
+func (x *PrimitivesItems) GetPattern() string {
+ if x != nil {
+ return x.Pattern
}
return ""
}
-func (m *PrimitivesItems) GetMaxItems() int64 {
- if m != nil {
- return m.MaxItems
+func (x *PrimitivesItems) GetMaxItems() int64 {
+ if x != nil {
+ return x.MaxItems
}
return 0
}
-func (m *PrimitivesItems) GetMinItems() int64 {
- if m != nil {
- return m.MinItems
+func (x *PrimitivesItems) GetMinItems() int64 {
+ if x != nil {
+ return x.MinItems
}
return 0
}
-func (m *PrimitivesItems) GetUniqueItems() bool {
- if m != nil {
- return m.UniqueItems
+func (x *PrimitivesItems) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
}
return false
}
-func (m *PrimitivesItems) GetEnum() []*Any {
- if m != nil {
- return m.Enum
+func (x *PrimitivesItems) GetEnum() []*Any {
+ if x != nil {
+ return x.Enum
}
return nil
}
-func (m *PrimitivesItems) GetMultipleOf() float64 {
- if m != nil {
- return m.MultipleOf
+func (x *PrimitivesItems) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
}
return 0
}
-func (m *PrimitivesItems) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *PrimitivesItems) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Properties struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Properties) Reset() { *m = Properties{} }
-func (m *Properties) String() string { return proto.CompactTextString(m) }
-func (*Properties) ProtoMessage() {}
-func (*Properties) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{44}
+func (x *Properties) Reset() {
+ *x = Properties{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Properties) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Properties.Unmarshal(m, b)
-}
-func (m *Properties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Properties.Marshal(b, m, deterministic)
-}
-func (m *Properties) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Properties.Merge(m, src)
-}
-func (m *Properties) XXX_Size() int {
- return xxx_messageInfo_Properties.Size(m)
+func (x *Properties) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Properties) XXX_DiscardUnknown() {
- xxx_messageInfo_Properties.DiscardUnknown(m)
+
+func (*Properties) ProtoMessage() {}
+
+func (x *Properties) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Properties proto.InternalMessageInfo
+// Deprecated: Use Properties.ProtoReflect.Descriptor instead.
+func (*Properties) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44}
+}
-func (m *Properties) GetAdditionalProperties() []*NamedSchema {
- if m != nil {
- return m.AdditionalProperties
+func (x *Properties) GetAdditionalProperties() []*NamedSchema {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
type QueryParameterSubSchema struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// Determines whether or not this parameter is required or optional.
Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"`
// Determines the location of the parameter.
@@ -3673,378 +4019,390 @@ type QueryParameterSubSchema struct {
// The name of the parameter.
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
// allows sending a parameter by name only or with an empty value.
- AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"`
- Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"`
- Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"`
- Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"`
- CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
- Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"`
- Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
- ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
- Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
- ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
- MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
- MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
- Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
- MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
- MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
- UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
- Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"`
- MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} }
-func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) }
-func (*QueryParameterSubSchema) ProtoMessage() {}
+ AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"`
+ Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"`
+ Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"`
+ Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"`
+ CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"`
+ Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"`
+ Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
+}
+
+func (x *QueryParameterSubSchema) Reset() {
+ *x = QueryParameterSubSchema{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryParameterSubSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryParameterSubSchema) ProtoMessage() {}
+
+func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead.
func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{45}
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45}
}
-func (m *QueryParameterSubSchema) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_QueryParameterSubSchema.Unmarshal(m, b)
-}
-func (m *QueryParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_QueryParameterSubSchema.Marshal(b, m, deterministic)
-}
-func (m *QueryParameterSubSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryParameterSubSchema.Merge(m, src)
-}
-func (m *QueryParameterSubSchema) XXX_Size() int {
- return xxx_messageInfo_QueryParameterSubSchema.Size(m)
-}
-func (m *QueryParameterSubSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryParameterSubSchema.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryParameterSubSchema proto.InternalMessageInfo
-
-func (m *QueryParameterSubSchema) GetRequired() bool {
- if m != nil {
- return m.Required
+func (x *QueryParameterSubSchema) GetRequired() bool {
+ if x != nil {
+ return x.Required
}
return false
}
-func (m *QueryParameterSubSchema) GetIn() string {
- if m != nil {
- return m.In
+func (x *QueryParameterSubSchema) GetIn() string {
+ if x != nil {
+ return x.In
}
return ""
}
-func (m *QueryParameterSubSchema) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *QueryParameterSubSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *QueryParameterSubSchema) GetName() string {
- if m != nil {
- return m.Name
+func (x *QueryParameterSubSchema) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool {
- if m != nil {
- return m.AllowEmptyValue
+func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool {
+ if x != nil {
+ return x.AllowEmptyValue
}
return false
}
-func (m *QueryParameterSubSchema) GetType() string {
- if m != nil {
- return m.Type
+func (x *QueryParameterSubSchema) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *QueryParameterSubSchema) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *QueryParameterSubSchema) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems {
- if m != nil {
- return m.Items
+func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems {
+ if x != nil {
+ return x.Items
}
return nil
}
-func (m *QueryParameterSubSchema) GetCollectionFormat() string {
- if m != nil {
- return m.CollectionFormat
+func (x *QueryParameterSubSchema) GetCollectionFormat() string {
+ if x != nil {
+ return x.CollectionFormat
}
return ""
}
-func (m *QueryParameterSubSchema) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *QueryParameterSubSchema) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *QueryParameterSubSchema) GetMaximum() float64 {
- if m != nil {
- return m.Maximum
+func (x *QueryParameterSubSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
}
return 0
}
-func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool {
- if m != nil {
- return m.ExclusiveMaximum
+func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
}
return false
}
-func (m *QueryParameterSubSchema) GetMinimum() float64 {
- if m != nil {
- return m.Minimum
+func (x *QueryParameterSubSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
}
return 0
}
-func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool {
- if m != nil {
- return m.ExclusiveMinimum
+func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
}
return false
}
-func (m *QueryParameterSubSchema) GetMaxLength() int64 {
- if m != nil {
- return m.MaxLength
+func (x *QueryParameterSubSchema) GetMaxLength() int64 {
+ if x != nil {
+ return x.MaxLength
}
return 0
}
-func (m *QueryParameterSubSchema) GetMinLength() int64 {
- if m != nil {
- return m.MinLength
+func (x *QueryParameterSubSchema) GetMinLength() int64 {
+ if x != nil {
+ return x.MinLength
}
return 0
}
-func (m *QueryParameterSubSchema) GetPattern() string {
- if m != nil {
- return m.Pattern
+func (x *QueryParameterSubSchema) GetPattern() string {
+ if x != nil {
+ return x.Pattern
}
return ""
}
-func (m *QueryParameterSubSchema) GetMaxItems() int64 {
- if m != nil {
- return m.MaxItems
+func (x *QueryParameterSubSchema) GetMaxItems() int64 {
+ if x != nil {
+ return x.MaxItems
}
return 0
}
-func (m *QueryParameterSubSchema) GetMinItems() int64 {
- if m != nil {
- return m.MinItems
+func (x *QueryParameterSubSchema) GetMinItems() int64 {
+ if x != nil {
+ return x.MinItems
}
return 0
}
-func (m *QueryParameterSubSchema) GetUniqueItems() bool {
- if m != nil {
- return m.UniqueItems
+func (x *QueryParameterSubSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
}
return false
}
-func (m *QueryParameterSubSchema) GetEnum() []*Any {
- if m != nil {
- return m.Enum
+func (x *QueryParameterSubSchema) GetEnum() []*Any {
+ if x != nil {
+ return x.Enum
}
return nil
}
-func (m *QueryParameterSubSchema) GetMultipleOf() float64 {
- if m != nil {
- return m.MultipleOf
+func (x *QueryParameterSubSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
}
return 0
}
-func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type Response struct {
- Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
- Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
- Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"`
- Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{46}
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Response) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Response.Unmarshal(m, b)
-}
-func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Response.Marshal(b, m, deterministic)
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+ Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"`
+ Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Response) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Response.Merge(m, src)
+
+func (x *Response) Reset() {
+ *x = Response{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Response) XXX_Size() int {
- return xxx_messageInfo_Response.Size(m)
+
+func (x *Response) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Response) XXX_DiscardUnknown() {
- xxx_messageInfo_Response.DiscardUnknown(m)
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Response proto.InternalMessageInfo
+// Deprecated: Use Response.ProtoReflect.Descriptor instead.
+func (*Response) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46}
+}
-func (m *Response) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Response) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Response) GetSchema() *SchemaItem {
- if m != nil {
- return m.Schema
+func (x *Response) GetSchema() *SchemaItem {
+ if x != nil {
+ return x.Schema
}
return nil
}
-func (m *Response) GetHeaders() *Headers {
- if m != nil {
- return m.Headers
+func (x *Response) GetHeaders() *Headers {
+ if x != nil {
+ return x.Headers
}
return nil
}
-func (m *Response) GetExamples() *Examples {
- if m != nil {
- return m.Examples
+func (x *Response) GetExamples() *Examples {
+ if x != nil {
+ return x.Examples
}
return nil
}
-func (m *Response) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Response) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
-// One or more JSON representations for parameters
+// One or more JSON representations for responses
type ResponseDefinitions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} }
-func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) }
-func (*ResponseDefinitions) ProtoMessage() {}
-func (*ResponseDefinitions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{47}
+func (x *ResponseDefinitions) Reset() {
+ *x = ResponseDefinitions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ResponseDefinitions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ResponseDefinitions.Unmarshal(m, b)
-}
-func (m *ResponseDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ResponseDefinitions.Marshal(b, m, deterministic)
+func (x *ResponseDefinitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ResponseDefinitions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseDefinitions.Merge(m, src)
-}
-func (m *ResponseDefinitions) XXX_Size() int {
- return xxx_messageInfo_ResponseDefinitions.Size(m)
-}
-func (m *ResponseDefinitions) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseDefinitions.DiscardUnknown(m)
+
+func (*ResponseDefinitions) ProtoMessage() {}
+
+func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ResponseDefinitions proto.InternalMessageInfo
+// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead.
+func (*ResponseDefinitions) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47}
+}
-func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse {
- if m != nil {
- return m.AdditionalProperties
+func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
type ResponseValue struct {
- // Types that are valid to be assigned to Oneof:
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Oneof:
// *ResponseValue_Response
// *ResponseValue_JsonReference
- Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"`
}
-func (m *ResponseValue) Reset() { *m = ResponseValue{} }
-func (m *ResponseValue) String() string { return proto.CompactTextString(m) }
-func (*ResponseValue) ProtoMessage() {}
-func (*ResponseValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{48}
+func (x *ResponseValue) Reset() {
+ *x = ResponseValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ResponseValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ResponseValue.Unmarshal(m, b)
-}
-func (m *ResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ResponseValue.Marshal(b, m, deterministic)
-}
-func (m *ResponseValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseValue.Merge(m, src)
-}
-func (m *ResponseValue) XXX_Size() int {
- return xxx_messageInfo_ResponseValue.Size(m)
-}
-func (m *ResponseValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseValue.DiscardUnknown(m)
+func (x *ResponseValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_ResponseValue proto.InternalMessageInfo
+func (*ResponseValue) ProtoMessage() {}
-type isResponseValue_Oneof interface {
- isResponseValue_Oneof()
-}
-
-type ResponseValue_Response struct {
- Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"`
+func (x *ResponseValue) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-type ResponseValue_JsonReference struct {
- JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"`
+// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead.
+func (*ResponseValue) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48}
}
-func (*ResponseValue_Response) isResponseValue_Oneof() {}
-
-func (*ResponseValue_JsonReference) isResponseValue_Oneof() {}
-
func (m *ResponseValue) GetOneof() isResponseValue_Oneof {
if m != nil {
return m.Oneof
@@ -4052,78 +4410,98 @@ func (m *ResponseValue) GetOneof() isResponseValue_Oneof {
return nil
}
-func (m *ResponseValue) GetResponse() *Response {
- if x, ok := m.GetOneof().(*ResponseValue_Response); ok {
+func (x *ResponseValue) GetResponse() *Response {
+ if x, ok := x.GetOneof().(*ResponseValue_Response); ok {
return x.Response
}
return nil
}
-func (m *ResponseValue) GetJsonReference() *JsonReference {
- if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok {
+func (x *ResponseValue) GetJsonReference() *JsonReference {
+ if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok {
return x.JsonReference
}
return nil
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ResponseValue) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ResponseValue_Response)(nil),
- (*ResponseValue_JsonReference)(nil),
- }
+type isResponseValue_Oneof interface {
+ isResponseValue_Oneof()
}
-// Response objects names can either be any valid HTTP status code or 'default'.
-type Responses struct {
- ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+type ResponseValue_Response struct {
+ Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"`
}
-func (m *Responses) Reset() { *m = Responses{} }
-func (m *Responses) String() string { return proto.CompactTextString(m) }
-func (*Responses) ProtoMessage() {}
-func (*Responses) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{49}
+type ResponseValue_JsonReference struct {
+ JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"`
}
-func (m *Responses) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Responses.Unmarshal(m, b)
-}
-func (m *Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Responses.Marshal(b, m, deterministic)
+func (*ResponseValue_Response) isResponseValue_Oneof() {}
+
+func (*ResponseValue_JsonReference) isResponseValue_Oneof() {}
+
+// Response objects names can either be any valid HTTP status code or 'default'.
+type Responses struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Responses) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Responses.Merge(m, src)
+
+func (x *Responses) Reset() {
+ *x = Responses{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Responses) XXX_Size() int {
- return xxx_messageInfo_Responses.Size(m)
+
+func (x *Responses) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Responses) XXX_DiscardUnknown() {
- xxx_messageInfo_Responses.DiscardUnknown(m)
+
+func (*Responses) ProtoMessage() {}
+
+func (x *Responses) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Responses proto.InternalMessageInfo
+// Deprecated: Use Responses.ProtoReflect.Descriptor instead.
+func (*Responses) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49}
+}
-func (m *Responses) GetResponseCode() []*NamedResponseValue {
- if m != nil {
- return m.ResponseCode
+func (x *Responses) GetResponseCode() []*NamedResponseValue {
+ if x != nil {
+ return x.ResponseCode
}
return nil
}
-func (m *Responses) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Responses) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
// A deterministic version of a JSON Schema object.
type Schema struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"`
Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"`
Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
@@ -4155,304 +4533,300 @@ type Schema struct {
ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"`
VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Schema) Reset() { *m = Schema{} }
-func (m *Schema) String() string { return proto.CompactTextString(m) }
-func (*Schema) ProtoMessage() {}
-func (*Schema) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{50}
+func (x *Schema) Reset() {
+ *x = Schema{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Schema) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Schema.Unmarshal(m, b)
-}
-func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Schema.Marshal(b, m, deterministic)
-}
-func (m *Schema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Schema.Merge(m, src)
-}
-func (m *Schema) XXX_Size() int {
- return xxx_messageInfo_Schema.Size(m)
+func (x *Schema) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Schema) XXX_DiscardUnknown() {
- xxx_messageInfo_Schema.DiscardUnknown(m)
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Schema proto.InternalMessageInfo
+// Deprecated: Use Schema.ProtoReflect.Descriptor instead.
+func (*Schema) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50}
+}
-func (m *Schema) GetXRef() string {
- if m != nil {
- return m.XRef
+func (x *Schema) GetXRef() string {
+ if x != nil {
+ return x.XRef
}
return ""
}
-func (m *Schema) GetFormat() string {
- if m != nil {
- return m.Format
+func (x *Schema) GetFormat() string {
+ if x != nil {
+ return x.Format
}
return ""
}
-func (m *Schema) GetTitle() string {
- if m != nil {
- return m.Title
+func (x *Schema) GetTitle() string {
+ if x != nil {
+ return x.Title
}
return ""
}
-func (m *Schema) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Schema) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Schema) GetDefault() *Any {
- if m != nil {
- return m.Default
+func (x *Schema) GetDefault() *Any {
+ if x != nil {
+ return x.Default
}
return nil
}
-func (m *Schema) GetMultipleOf() float64 {
- if m != nil {
- return m.MultipleOf
+func (x *Schema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
}
return 0
}
-func (m *Schema) GetMaximum() float64 {
- if m != nil {
- return m.Maximum
+func (x *Schema) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
}
return 0
}
-func (m *Schema) GetExclusiveMaximum() bool {
- if m != nil {
- return m.ExclusiveMaximum
+func (x *Schema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
}
return false
}
-func (m *Schema) GetMinimum() float64 {
- if m != nil {
- return m.Minimum
+func (x *Schema) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
}
return 0
}
-func (m *Schema) GetExclusiveMinimum() bool {
- if m != nil {
- return m.ExclusiveMinimum
+func (x *Schema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
}
return false
}
-func (m *Schema) GetMaxLength() int64 {
- if m != nil {
- return m.MaxLength
+func (x *Schema) GetMaxLength() int64 {
+ if x != nil {
+ return x.MaxLength
}
return 0
}
-func (m *Schema) GetMinLength() int64 {
- if m != nil {
- return m.MinLength
+func (x *Schema) GetMinLength() int64 {
+ if x != nil {
+ return x.MinLength
}
return 0
}
-func (m *Schema) GetPattern() string {
- if m != nil {
- return m.Pattern
+func (x *Schema) GetPattern() string {
+ if x != nil {
+ return x.Pattern
}
return ""
}
-func (m *Schema) GetMaxItems() int64 {
- if m != nil {
- return m.MaxItems
+func (x *Schema) GetMaxItems() int64 {
+ if x != nil {
+ return x.MaxItems
}
return 0
}
-func (m *Schema) GetMinItems() int64 {
- if m != nil {
- return m.MinItems
+func (x *Schema) GetMinItems() int64 {
+ if x != nil {
+ return x.MinItems
}
return 0
}
-func (m *Schema) GetUniqueItems() bool {
- if m != nil {
- return m.UniqueItems
+func (x *Schema) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
}
return false
}
-func (m *Schema) GetMaxProperties() int64 {
- if m != nil {
- return m.MaxProperties
+func (x *Schema) GetMaxProperties() int64 {
+ if x != nil {
+ return x.MaxProperties
}
return 0
}
-func (m *Schema) GetMinProperties() int64 {
- if m != nil {
- return m.MinProperties
+func (x *Schema) GetMinProperties() int64 {
+ if x != nil {
+ return x.MinProperties
}
return 0
}
-func (m *Schema) GetRequired() []string {
- if m != nil {
- return m.Required
+func (x *Schema) GetRequired() []string {
+ if x != nil {
+ return x.Required
}
return nil
}
-func (m *Schema) GetEnum() []*Any {
- if m != nil {
- return m.Enum
+func (x *Schema) GetEnum() []*Any {
+ if x != nil {
+ return x.Enum
}
return nil
}
-func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem {
- if m != nil {
- return m.AdditionalProperties
+func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
-func (m *Schema) GetType() *TypeItem {
- if m != nil {
- return m.Type
+func (x *Schema) GetType() *TypeItem {
+ if x != nil {
+ return x.Type
}
return nil
}
-func (m *Schema) GetItems() *ItemsItem {
- if m != nil {
- return m.Items
+func (x *Schema) GetItems() *ItemsItem {
+ if x != nil {
+ return x.Items
}
return nil
}
-func (m *Schema) GetAllOf() []*Schema {
- if m != nil {
- return m.AllOf
+func (x *Schema) GetAllOf() []*Schema {
+ if x != nil {
+ return x.AllOf
}
return nil
}
-func (m *Schema) GetProperties() *Properties {
- if m != nil {
- return m.Properties
+func (x *Schema) GetProperties() *Properties {
+ if x != nil {
+ return x.Properties
}
return nil
}
-func (m *Schema) GetDiscriminator() string {
- if m != nil {
- return m.Discriminator
+func (x *Schema) GetDiscriminator() string {
+ if x != nil {
+ return x.Discriminator
}
return ""
}
-func (m *Schema) GetReadOnly() bool {
- if m != nil {
- return m.ReadOnly
+func (x *Schema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
}
return false
}
-func (m *Schema) GetXml() *Xml {
- if m != nil {
- return m.Xml
+func (x *Schema) GetXml() *Xml {
+ if x != nil {
+ return x.Xml
}
return nil
}
-func (m *Schema) GetExternalDocs() *ExternalDocs {
- if m != nil {
- return m.ExternalDocs
+func (x *Schema) GetExternalDocs() *ExternalDocs {
+ if x != nil {
+ return x.ExternalDocs
}
return nil
}
-func (m *Schema) GetExample() *Any {
- if m != nil {
- return m.Example
+func (x *Schema) GetExample() *Any {
+ if x != nil {
+ return x.Example
}
return nil
}
-func (m *Schema) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Schema) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type SchemaItem struct {
- // Types that are valid to be assigned to Oneof:
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Oneof:
// *SchemaItem_Schema
// *SchemaItem_FileSchema
- Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"`
}
-func (m *SchemaItem) Reset() { *m = SchemaItem{} }
-func (m *SchemaItem) String() string { return proto.CompactTextString(m) }
-func (*SchemaItem) ProtoMessage() {}
-func (*SchemaItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{51}
+func (x *SchemaItem) Reset() {
+ *x = SchemaItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *SchemaItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SchemaItem.Unmarshal(m, b)
-}
-func (m *SchemaItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SchemaItem.Marshal(b, m, deterministic)
-}
-func (m *SchemaItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SchemaItem.Merge(m, src)
+func (x *SchemaItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *SchemaItem) XXX_Size() int {
- return xxx_messageInfo_SchemaItem.Size(m)
-}
-func (m *SchemaItem) XXX_DiscardUnknown() {
- xxx_messageInfo_SchemaItem.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SchemaItem proto.InternalMessageInfo
-type isSchemaItem_Oneof interface {
- isSchemaItem_Oneof()
-}
+func (*SchemaItem) ProtoMessage() {}
-type SchemaItem_Schema struct {
- Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"`
+func (x *SchemaItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-type SchemaItem_FileSchema struct {
- FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"`
+// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead.
+func (*SchemaItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51}
}
-func (*SchemaItem_Schema) isSchemaItem_Oneof() {}
-
-func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {}
-
func (m *SchemaItem) GetOneof() isSchemaItem_Oneof {
if m != nil {
return m.Oneof
@@ -4460,105 +4834,178 @@ func (m *SchemaItem) GetOneof() isSchemaItem_Oneof {
return nil
}
-func (m *SchemaItem) GetSchema() *Schema {
- if x, ok := m.GetOneof().(*SchemaItem_Schema); ok {
+func (x *SchemaItem) GetSchema() *Schema {
+ if x, ok := x.GetOneof().(*SchemaItem_Schema); ok {
return x.Schema
}
return nil
}
-func (m *SchemaItem) GetFileSchema() *FileSchema {
- if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok {
+func (x *SchemaItem) GetFileSchema() *FileSchema {
+ if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok {
return x.FileSchema
}
return nil
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*SchemaItem) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*SchemaItem_Schema)(nil),
- (*SchemaItem_FileSchema)(nil),
- }
+type isSchemaItem_Oneof interface {
+ isSchemaItem_Oneof()
}
-type SecurityDefinitions struct {
- AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+type SchemaItem_Schema struct {
+ Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"`
}
-func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} }
-func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) }
-func (*SecurityDefinitions) ProtoMessage() {}
-func (*SecurityDefinitions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{52}
+type SchemaItem_FileSchema struct {
+ FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"`
}
-func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b)
-}
-func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic)
+func (*SchemaItem_Schema) isSchemaItem_Oneof() {}
+
+func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {}
+
+type SecurityDefinitions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
}
-func (m *SecurityDefinitions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecurityDefinitions.Merge(m, src)
+
+func (x *SecurityDefinitions) Reset() {
+ *x = SecurityDefinitions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *SecurityDefinitions) XXX_Size() int {
- return xxx_messageInfo_SecurityDefinitions.Size(m)
+
+func (x *SecurityDefinitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *SecurityDefinitions) XXX_DiscardUnknown() {
- xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m)
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo
+// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead.
+func (*SecurityDefinitions) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52}
+}
-func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem {
- if m != nil {
- return m.AdditionalProperties
+func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
type SecurityDefinitionsItem struct {
- // Types that are valid to be assigned to Oneof:
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Oneof:
// *SecurityDefinitionsItem_BasicAuthenticationSecurity
// *SecurityDefinitionsItem_ApiKeySecurity
// *SecurityDefinitionsItem_Oauth2ImplicitSecurity
// *SecurityDefinitionsItem_Oauth2PasswordSecurity
// *SecurityDefinitionsItem_Oauth2ApplicationSecurity
// *SecurityDefinitionsItem_Oauth2AccessCodeSecurity
- Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"`
}
-func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} }
-func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) }
-func (*SecurityDefinitionsItem) ProtoMessage() {}
+func (x *SecurityDefinitionsItem) Reset() {
+ *x = SecurityDefinitionsItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SecurityDefinitionsItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitionsItem) ProtoMessage() {}
+
+func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead.
func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{53}
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53}
}
-func (m *SecurityDefinitionsItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SecurityDefinitionsItem.Unmarshal(m, b)
+func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof {
+ if m != nil {
+ return m.Oneof
+ }
+ return nil
}
-func (m *SecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SecurityDefinitionsItem.Marshal(b, m, deterministic)
+
+func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity {
+ if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok {
+ return x.BasicAuthenticationSecurity
+ }
+ return nil
+}
+
+func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity {
+ if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok {
+ return x.ApiKeySecurity
+ }
+ return nil
}
-func (m *SecurityDefinitionsItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecurityDefinitionsItem.Merge(m, src)
+
+func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity {
+ if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok {
+ return x.Oauth2ImplicitSecurity
+ }
+ return nil
}
-func (m *SecurityDefinitionsItem) XXX_Size() int {
- return xxx_messageInfo_SecurityDefinitionsItem.Size(m)
+
+func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity {
+ if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok {
+ return x.Oauth2PasswordSecurity
+ }
+ return nil
}
-func (m *SecurityDefinitionsItem) XXX_DiscardUnknown() {
- xxx_messageInfo_SecurityDefinitionsItem.DiscardUnknown(m)
+
+func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity {
+ if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok {
+ return x.Oauth2ApplicationSecurity
+ }
+ return nil
}
-var xxx_messageInfo_SecurityDefinitionsItem proto.InternalMessageInfo
+func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity {
+ if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok {
+ return x.Oauth2AccessCodeSecurity
+ }
+ return nil
+}
type isSecurityDefinitionsItem_Oneof interface {
isSecurityDefinitionsItem_Oneof()
@@ -4600,627 +5047,2296 @@ func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsI
func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {}
-func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof {
- if m != nil {
- return m.Oneof
- }
- return nil
+type SecurityRequirement struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
}
-func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity {
- if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok {
- return x.BasicAuthenticationSecurity
+func (x *SecurityRequirement) Reset() {
+ *x = SecurityRequirement{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity {
- if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok {
- return x.ApiKeySecurity
- }
- return nil
+func (x *SecurityRequirement) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity {
- if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok {
- return x.Oauth2ImplicitSecurity
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity {
- if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok {
- return x.Oauth2PasswordSecurity
- }
- return nil
+// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead.
+func (*SecurityRequirement) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54}
}
-func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity {
- if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok {
- return x.Oauth2ApplicationSecurity
+func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
-func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity {
- if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok {
- return x.Oauth2AccessCodeSecurity
- }
- return nil
+type StringArray struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*SecurityDefinitionsItem) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil),
- (*SecurityDefinitionsItem_ApiKeySecurity)(nil),
- (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil),
- (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil),
- (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil),
- (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil),
+func (x *StringArray) Reset() {
+ *x = StringArray{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
}
-type SecurityRequirement struct {
- AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *StringArray) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} }
-func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) }
-func (*SecurityRequirement) ProtoMessage() {}
-func (*SecurityRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{54}
-}
+func (*StringArray) ProtoMessage() {}
-func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b)
-}
-func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic)
-}
-func (m *SecurityRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecurityRequirement.Merge(m, src)
-}
-func (m *SecurityRequirement) XXX_Size() int {
- return xxx_messageInfo_SecurityRequirement.Size(m)
-}
-func (m *SecurityRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_SecurityRequirement.DiscardUnknown(m)
+func (x *StringArray) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo
+// Deprecated: Use StringArray.ProtoReflect.Descriptor instead.
+func (*StringArray) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55}
+}
-func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray {
- if m != nil {
- return m.AdditionalProperties
+func (x *StringArray) GetValue() []string {
+ if x != nil {
+ return x.Value
}
return nil
}
-type StringArray struct {
- Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+type Tag struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *StringArray) Reset() { *m = StringArray{} }
-func (m *StringArray) String() string { return proto.CompactTextString(m) }
-func (*StringArray) ProtoMessage() {}
-func (*StringArray) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{55}
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *StringArray) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StringArray.Unmarshal(m, b)
-}
-func (m *StringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StringArray.Marshal(b, m, deterministic)
-}
-func (m *StringArray) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StringArray.Merge(m, src)
-}
-func (m *StringArray) XXX_Size() int {
- return xxx_messageInfo_StringArray.Size(m)
+func (x *Tag) Reset() {
+ *x = Tag{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *StringArray) XXX_DiscardUnknown() {
- xxx_messageInfo_StringArray.DiscardUnknown(m)
+
+func (x *Tag) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_StringArray proto.InternalMessageInfo
+func (*Tag) ProtoMessage() {}
-func (m *StringArray) GetValue() []string {
- if m != nil {
- return m.Value
+func (x *Tag) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type Tag struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Tag) Reset() { *m = Tag{} }
-func (m *Tag) String() string { return proto.CompactTextString(m) }
-func (*Tag) ProtoMessage() {}
+// Deprecated: Use Tag.ProtoReflect.Descriptor instead.
func (*Tag) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{56}
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56}
}
-func (m *Tag) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Tag.Unmarshal(m, b)
-}
-func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Tag.Marshal(b, m, deterministic)
-}
-func (m *Tag) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Tag.Merge(m, src)
-}
-func (m *Tag) XXX_Size() int {
- return xxx_messageInfo_Tag.Size(m)
-}
-func (m *Tag) XXX_DiscardUnknown() {
- xxx_messageInfo_Tag.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Tag proto.InternalMessageInfo
-
-func (m *Tag) GetName() string {
- if m != nil {
- return m.Name
+func (x *Tag) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *Tag) GetDescription() string {
- if m != nil {
- return m.Description
+func (x *Tag) GetDescription() string {
+ if x != nil {
+ return x.Description
}
return ""
}
-func (m *Tag) GetExternalDocs() *ExternalDocs {
- if m != nil {
- return m.ExternalDocs
+func (x *Tag) GetExternalDocs() *ExternalDocs {
+ if x != nil {
+ return x.ExternalDocs
}
return nil
}
-func (m *Tag) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
+func (x *Tag) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
}
return nil
}
type TypeItem struct {
- Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *TypeItem) Reset() { *m = TypeItem{} }
-func (m *TypeItem) String() string { return proto.CompactTextString(m) }
-func (*TypeItem) ProtoMessage() {}
-func (*TypeItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{57}
+ Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}
-func (m *TypeItem) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TypeItem.Unmarshal(m, b)
-}
-func (m *TypeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TypeItem.Marshal(b, m, deterministic)
-}
-func (m *TypeItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeItem.Merge(m, src)
+func (x *TypeItem) Reset() {
+ *x = TypeItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *TypeItem) XXX_Size() int {
- return xxx_messageInfo_TypeItem.Size(m)
+
+func (x *TypeItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *TypeItem) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeItem.DiscardUnknown(m)
+
+func (*TypeItem) ProtoMessage() {}
+
+func (x *TypeItem) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_TypeItem proto.InternalMessageInfo
+// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead.
+func (*TypeItem) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57}
+}
-func (m *TypeItem) GetValue() []string {
- if m != nil {
- return m.Value
+func (x *TypeItem) GetValue() []string {
+ if x != nil {
+ return x.Value
}
return nil
}
// Any property starting with x- is valid.
type VendorExtension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *VendorExtension) Reset() { *m = VendorExtension{} }
-func (m *VendorExtension) String() string { return proto.CompactTextString(m) }
-func (*VendorExtension) ProtoMessage() {}
-func (*VendorExtension) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{58}
+func (x *VendorExtension) Reset() {
+ *x = VendorExtension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *VendorExtension) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_VendorExtension.Unmarshal(m, b)
-}
-func (m *VendorExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_VendorExtension.Marshal(b, m, deterministic)
+func (x *VendorExtension) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VendorExtension) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VendorExtension.Merge(m, src)
-}
-func (m *VendorExtension) XXX_Size() int {
- return xxx_messageInfo_VendorExtension.Size(m)
-}
-func (m *VendorExtension) XXX_DiscardUnknown() {
- xxx_messageInfo_VendorExtension.DiscardUnknown(m)
+
+func (*VendorExtension) ProtoMessage() {}
+
+func (x *VendorExtension) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_VendorExtension proto.InternalMessageInfo
+// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead.
+func (*VendorExtension) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58}
+}
-func (m *VendorExtension) GetAdditionalProperties() []*NamedAny {
- if m != nil {
- return m.AdditionalProperties
+func (x *VendorExtension) GetAdditionalProperties() []*NamedAny {
+ if x != nil {
+ return x.AdditionalProperties
}
return nil
}
type Xml struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"`
- Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"`
- Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"`
- VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Xml) Reset() { *m = Xml{} }
-func (m *Xml) String() string { return proto.CompactTextString(m) }
-func (*Xml) ProtoMessage() {}
-func (*Xml) Descriptor() ([]byte, []int) {
- return fileDescriptor_a43d10d209cd31c2, []int{59}
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Xml) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Xml.Unmarshal(m, b)
-}
-func (m *Xml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Xml.Marshal(b, m, deterministic)
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"`
+ Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"`
+ VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"`
}
-func (m *Xml) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Xml.Merge(m, src)
+
+func (x *Xml) Reset() {
+ *x = Xml{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Xml) XXX_Size() int {
- return xxx_messageInfo_Xml.Size(m)
+
+func (x *Xml) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Xml) XXX_DiscardUnknown() {
- xxx_messageInfo_Xml.DiscardUnknown(m)
+
+func (*Xml) ProtoMessage() {}
+
+func (x *Xml) ProtoReflect() protoreflect.Message {
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Xml proto.InternalMessageInfo
+// Deprecated: Use Xml.ProtoReflect.Descriptor instead.
+func (*Xml) Descriptor() ([]byte, []int) {
+ return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59}
+}
-func (m *Xml) GetName() string {
- if m != nil {
- return m.Name
+func (x *Xml) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *Xml) GetNamespace() string {
- if m != nil {
- return m.Namespace
+func (x *Xml) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
}
return ""
}
-func (m *Xml) GetPrefix() string {
- if m != nil {
- return m.Prefix
+func (x *Xml) GetPrefix() string {
+ if x != nil {
+ return x.Prefix
}
return ""
}
-func (m *Xml) GetAttribute() bool {
- if m != nil {
- return m.Attribute
+func (x *Xml) GetAttribute() bool {
+ if x != nil {
+ return x.Attribute
}
return false
}
-func (m *Xml) GetWrapped() bool {
- if m != nil {
- return m.Wrapped
+func (x *Xml) GetWrapped() bool {
+ if x != nil {
+ return x.Wrapped
}
return false
}
-func (m *Xml) GetVendorExtension() []*NamedAny {
- if m != nil {
- return m.VendorExtension
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem")
- proto.RegisterType((*Any)(nil), "openapi.v2.Any")
- proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity")
- proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity")
- proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter")
- proto.RegisterType((*Contact)(nil), "openapi.v2.Contact")
- proto.RegisterType((*Default)(nil), "openapi.v2.Default")
- proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions")
- proto.RegisterType((*Document)(nil), "openapi.v2.Document")
- proto.RegisterType((*Examples)(nil), "openapi.v2.Examples")
- proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs")
- proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema")
- proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema")
- proto.RegisterType((*Header)(nil), "openapi.v2.Header")
- proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema")
- proto.RegisterType((*Headers)(nil), "openapi.v2.Headers")
- proto.RegisterType((*Info)(nil), "openapi.v2.Info")
- proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem")
- proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference")
- proto.RegisterType((*License)(nil), "openapi.v2.License")
- proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny")
- proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader")
- proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter")
- proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem")
- proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse")
- proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue")
- proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema")
- proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem")
- proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString")
- proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray")
- proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter")
- proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity")
- proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity")
- proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity")
- proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity")
- proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes")
- proto.RegisterType((*Operation)(nil), "openapi.v2.Operation")
- proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter")
- proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions")
- proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem")
- proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem")
- proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema")
- proto.RegisterType((*Paths)(nil), "openapi.v2.Paths")
- proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems")
- proto.RegisterType((*Properties)(nil), "openapi.v2.Properties")
- proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema")
- proto.RegisterType((*Response)(nil), "openapi.v2.Response")
- proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions")
- proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue")
- proto.RegisterType((*Responses)(nil), "openapi.v2.Responses")
- proto.RegisterType((*Schema)(nil), "openapi.v2.Schema")
- proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem")
- proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions")
- proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem")
- proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement")
- proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray")
- proto.RegisterType((*Tag)(nil), "openapi.v2.Tag")
- proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem")
- proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension")
- proto.RegisterType((*Xml)(nil), "openapi.v2.Xml")
-}
-
-func init() { proto.RegisterFile("openapiv2/OpenAPIv2.proto", fileDescriptor_a43d10d209cd31c2) }
-
-var fileDescriptor_a43d10d209cd31c2 = []byte{
- // 3130 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57,
- 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c,
- 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb,
- 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a,
- 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5,
- 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xa6, 0x5f, 0xb7,
- 0xe7, 0x61, 0xb9, 0x80, 0x02, 0xad, 0x66, 0xee, 0x3d, 0xe7, 0x9e, 0x7b, 0xfa, 0xf4, 0x79, 0xdd,
- 0x73, 0x6e, 0xc3, 0x3a, 0xf1, 0xb0, 0x8b, 0x3c, 0xeb, 0x64, 0xe7, 0xd6, 0x9e, 0x87, 0xdd, 0xdd,
- 0xfd, 0x07, 0x27, 0x3b, 0xdb, 0x9e, 0x4f, 0x42, 0xa2, 0x81, 0x00, 0x6d, 0x9f, 0xec, 0x6c, 0xac,
- 0x1f, 0x11, 0x72, 0x64, 0xe3, 0x5b, 0x0c, 0x72, 0x38, 0x1c, 0xdc, 0x42, 0xee, 0x88, 0xa3, 0x6d,
- 0x39, 0xa0, 0xef, 0xf6, 0xfb, 0x56, 0x68, 0x11, 0x17, 0xd9, 0xfb, 0x3e, 0xf1, 0xb0, 0x1f, 0x5a,
- 0x38, 0x78, 0x10, 0x62, 0x47, 0xfb, 0x3f, 0xa8, 0x05, 0xbd, 0x63, 0xec, 0x20, 0xbd, 0x78, 0xa5,
- 0x78, 0x6d, 0x61, 0x47, 0xdb, 0x8e, 0x68, 0x6e, 0x1f, 0x30, 0x48, 0xb7, 0x60, 0x08, 0x1c, 0x6d,
- 0x03, 0xea, 0x87, 0x84, 0xd8, 0x18, 0xb9, 0x7a, 0xe9, 0x4a, 0xf1, 0x5a, 0xa3, 0x5b, 0x30, 0xe4,
- 0xc4, 0xed, 0x3a, 0x54, 0x89, 0x8b, 0xc9, 0x60, 0xeb, 0x1e, 0x94, 0x77, 0xdd, 0x91, 0x76, 0x03,
- 0xaa, 0x27, 0xc8, 0x1e, 0x62, 0x41, 0x78, 0x75, 0x9b, 0x33, 0xb8, 0x2d, 0x19, 0xdc, 0xde, 0x75,
- 0x47, 0x06, 0x47, 0xd1, 0x34, 0xa8, 0x8c, 0x90, 0x63, 0x33, 0xa2, 0x4d, 0x83, 0xfd, 0xdf, 0xfa,
- 0xa2, 0x08, 0xed, 0x5d, 0xcf, 0x7a, 0x17, 0x8f, 0x0e, 0x70, 0x6f, 0xe8, 0x5b, 0xe1, 0x88, 0xa2,
- 0x85, 0x23, 0x8f, 0x53, 0x6c, 0x1a, 0xec, 0x3f, 0x9d, 0x73, 0x91, 0x83, 0xe5, 0x52, 0xfa, 0x5f,
- 0x6b, 0x43, 0xc9, 0x72, 0xf5, 0x32, 0x9b, 0x29, 0x59, 0xae, 0x76, 0x05, 0x16, 0xfa, 0x38, 0xe8,
- 0xf9, 0x96, 0x47, 0x65, 0xa0, 0x57, 0x18, 0x20, 0x3e, 0xa5, 0x7d, 0x0d, 0x3a, 0x27, 0xd8, 0xed,
- 0x13, 0xdf, 0xc4, 0xa7, 0x21, 0x76, 0x03, 0x8a, 0x56, 0xbd, 0x52, 0x66, 0x7c, 0xc7, 0x04, 0xf2,
- 0x1e, 0x72, 0x70, 0x9f, 0xf2, 0xbd, 0xc4, 0xb1, 0xef, 0x49, 0xe4, 0xad, 0xcf, 0x8a, 0xb0, 0x79,
- 0x1b, 0x05, 0x56, 0x6f, 0x77, 0x18, 0x1e, 0x63, 0x37, 0xb4, 0x7a, 0x88, 0x12, 0x9e, 0xc8, 0x7a,
- 0x8a, 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1,
- 0x3e, 0xf2, 0x91, 0x83, 0x43, 0xec, 0xa7, 0x37, 0x2d, 0x66, 0x37, 0x9d, 0x45, 0xa2, 0x1b, 0xd0,
- 0xf0, 0xf1, 0x93, 0xa1, 0xe5, 0xe3, 0x3e, 0x13, 0x67, 0xc3, 0x18, 0x8f, 0xb5, 0x1b, 0x63, 0x95,
- 0xaa, 0xe6, 0xa9, 0xd4, 0x58, 0xa1, 0x54, 0x0f, 0x58, 0x9b, 0xe7, 0x01, 0x7f, 0x5c, 0x84, 0xfa,
- 0x1d, 0xe2, 0x86, 0xa8, 0x17, 0x8e, 0x19, 0x2f, 0xc6, 0x18, 0xef, 0x40, 0x79, 0xe8, 0x4b, 0xc5,
- 0xa2, 0x7f, 0xb5, 0x55, 0xa8, 0x62, 0x07, 0x59, 0xb6, 0x78, 0x1a, 0x3e, 0x50, 0x32, 0x52, 0x99,
- 0x87, 0x91, 0x47, 0x50, 0xbf, 0x8b, 0x07, 0x68, 0x68, 0x87, 0xda, 0x03, 0xb8, 0x80, 0xc6, 0xf6,
- 0x66, 0x7a, 0x63, 0x83, 0xd3, 0x8b, 0x13, 0x08, 0xae, 0x22, 0x85, 0x89, 0x6e, 0x7d, 0x07, 0x16,
- 0xee, 0xe2, 0x81, 0xe5, 0x32, 0x48, 0xa0, 0x3d, 0x9c, 0x4c, 0xf9, 0x62, 0x86, 0xb2, 0x10, 0xb7,
- 0x9a, 0xf8, 0x1f, 0xab, 0xd0, 0xb8, 0x4b, 0x7a, 0x43, 0x07, 0xbb, 0xa1, 0xa6, 0x43, 0x3d, 0x78,
- 0x8a, 0x8e, 0x8e, 0xb0, 0x2f, 0xe4, 0x27, 0x87, 0xda, 0xcb, 0x50, 0xb1, 0xdc, 0x01, 0x61, 0x32,
- 0x5c, 0xd8, 0xe9, 0xc4, 0xf7, 0x78, 0xe0, 0x0e, 0x88, 0xc1, 0xa0, 0x54, 0xf8, 0xc7, 0x24, 0x08,
- 0x85, 0x54, 0xd9, 0x7f, 0x6d, 0x13, 0x9a, 0x87, 0x28, 0xc0, 0xa6, 0x87, 0xc2, 0x63, 0x61, 0x75,
- 0x0d, 0x3a, 0xb1, 0x8f, 0xc2, 0x63, 0xb6, 0x21, 0xe5, 0x0e, 0x07, 0xcc, 0xd2, 0xe8, 0x86, 0x7c,
- 0x48, 0x95, 0xab, 0x47, 0xdc, 0x60, 0x48, 0x41, 0x35, 0x06, 0x1a, 0x8f, 0x29, 0xcc, 0xf3, 0x49,
- 0x7f, 0xd8, 0xc3, 0x81, 0x5e, 0xe7, 0x30, 0x39, 0xd6, 0x5e, 0x83, 0x2a, 0xdd, 0x29, 0xd0, 0x1b,
- 0x8c, 0xd3, 0xe5, 0x38, 0xa7, 0x74, 0xcb, 0xc0, 0xe0, 0x70, 0xed, 0x6d, 0x6a, 0x03, 0x63, 0xa9,
- 0xea, 0x4d, 0x86, 0x9e, 0x10, 0x5e, 0x4c, 0xe8, 0x46, 0x1c, 0x57, 0xfb, 0x3a, 0x80, 0x27, 0x6d,
- 0x29, 0xd0, 0x81, 0xad, 0xbc, 0x92, 0xdc, 0x48, 0x40, 0xe3, 0x24, 0x62, 0x6b, 0xb4, 0x77, 0xa0,
- 0xe9, 0xe3, 0xc0, 0x23, 0x6e, 0x80, 0x03, 0x7d, 0x81, 0x11, 0x78, 0x31, 0x4e, 0xc0, 0x10, 0xc0,
- 0xf8, 0xfa, 0x68, 0x85, 0xf6, 0x55, 0x68, 0x04, 0xc2, 0xa9, 0xe8, 0x8b, 0xec, 0xad, 0x27, 0x56,
- 0x4b, 0x87, 0x63, 0x70, 0x6b, 0xa4, 0xaf, 0xd6, 0x18, 0x2f, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd,
- 0xb8, 0x04, 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x38, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95,
- 0x10, 0x1d, 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x14, 0xa7, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd,
- 0x03, 0x2d, 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x8f, 0x63,
- 0xdf, 0x13, 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x6c, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6,
- 0xf9, 0x18, 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c,
- 0xc6, 0xd9, 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d,
- 0x1b, 0x73, 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7,
- 0x17, 0x5a, 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0xa4, 0x39, 0x2e, 0x67, 0x39, 0xbe, 0x0e, 0xf5, 0x3e,
- 0xf7, 0x6c, 0xcc, 0x86, 0x53, 0xef, 0x98, 0x72, 0x24, 0xe1, 0x89, 0xb0, 0xc0, 0x8d, 0x3a, 0x0a,
- 0x0b, 0x32, 0x02, 0xd6, 0x62, 0x11, 0x70, 0x93, 0xda, 0x02, 0xea, 0x9b, 0xc4, 0xb5, 0x47, 0x7a,
- 0x5d, 0xc6, 0x11, 0xd4, 0xdf, 0x73, 0xed, 0x51, 0x56, 0x67, 0x1a, 0x73, 0xe9, 0xcc, 0x75, 0xa8,
- 0x63, 0xfe, 0xca, 0x85, 0x81, 0x67, 0xd9, 0x16, 0x70, 0xe5, 0x1b, 0x80, 0x79, 0xde, 0xc0, 0x17,
- 0x35, 0xd8, 0xb8, 0x4f, 0x7c, 0xe7, 0x2e, 0x0a, 0xd1, 0xd8, 0x01, 0x1c, 0x0c, 0x0f, 0x0f, 0x64,
- 0xda, 0x14, 0x89, 0xa5, 0x98, 0x8a, 0x96, 0x3c, 0xb2, 0x96, 0xf2, 0x72, 0x95, 0x72, 0x7e, 0x7c,
- 0xae, 0xc4, 0xc2, 0xdc, 0x0d, 0x58, 0x46, 0xb6, 0x4d, 0x9e, 0x9a, 0xd8, 0xf1, 0xc2, 0x91, 0xc9,
- 0x13, 0xaf, 0x2a, 0xdb, 0x6a, 0x89, 0x01, 0xee, 0xd1, 0xf9, 0x0f, 0x64, 0xb2, 0x95, 0x79, 0x11,
- 0x91, 0xce, 0xd4, 0x13, 0x3a, 0xf3, 0xff, 0x50, 0xb5, 0x42, 0xec, 0x48, 0xd9, 0x6f, 0x26, 0x3c,
- 0x9d, 0x6f, 0x39, 0x56, 0x68, 0x9d, 0xf0, 0x4c, 0x32, 0x30, 0x38, 0xa6, 0xf6, 0x3a, 0x2c, 0xf7,
- 0x88, 0x6d, 0xe3, 0x1e, 0x65, 0xd6, 0x14, 0x54, 0x9b, 0x8c, 0x6a, 0x27, 0x02, 0xdc, 0xe7, 0xf4,
- 0x63, 0xba, 0x05, 0x53, 0x74, 0x4b, 0x87, 0xba, 0x83, 0x4e, 0x2d, 0x67, 0xe8, 0x30, 0xaf, 0x59,
- 0x34, 0xe4, 0x90, 0xee, 0x88, 0x4f, 0x7b, 0xf6, 0x30, 0xb0, 0x4e, 0xb0, 0x29, 0x71, 0x16, 0xd9,
- 0xc3, 0x77, 0xc6, 0x80, 0x6f, 0x0a, 0x64, 0x4a, 0xc6, 0x72, 0x19, 0x4a, 0x4b, 0x90, 0xe1, 0xc3,
- 0x14, 0x19, 0x81, 0xd3, 0x4e, 0x93, 0x11, 0xc8, 0x2f, 0x00, 0x38, 0xe8, 0xd4, 0xb4, 0xb1, 0x7b,
- 0x14, 0x1e, 0x33, 0x6f, 0x56, 0x36, 0x9a, 0x0e, 0x3a, 0x7d, 0xc8, 0x26, 0x18, 0xd8, 0x72, 0x25,
- 0xb8, 0x23, 0xc0, 0x96, 0x2b, 0xc0, 0x3a, 0xd4, 0x3d, 0x14, 0x52, 0x65, 0xd5, 0x97, 0x79, 0xb0,
- 0x15, 0x43, 0x6a, 0x11, 0x94, 0x2e, 0x17, 0xba, 0xc6, 0xd6, 0x35, 0x1c, 0x74, 0xca, 0x24, 0xcc,
- 0x80, 0x96, 0x2b, 0x80, 0x2b, 0x02, 0x68, 0xb9, 0x1c, 0xf8, 0x12, 0x2c, 0x0e, 0x5d, 0xeb, 0xc9,
- 0x10, 0x0b, 0xf8, 0x2a, 0xe3, 0x7c, 0x81, 0xcf, 0x71, 0x94, 0xab, 0x50, 0xc1, 0xee, 0xd0, 0xd1,
- 0x2f, 0x64, 0x5d, 0x35, 0x15, 0x35, 0x03, 0x6a, 0x2f, 0xc2, 0x82, 0x33, 0xb4, 0x43, 0xcb, 0xb3,
- 0xb1, 0x49, 0x06, 0xfa, 0x1a, 0x13, 0x12, 0xc8, 0xa9, 0xbd, 0x81, 0xd2, 0x5a, 0x2e, 0xce, 0x65,
- 0x2d, 0x55, 0xa8, 0x75, 0x31, 0xea, 0x63, 0x5f, 0x99, 0x16, 0x47, 0xba, 0x58, 0x52, 0xeb, 0x62,
- 0xf9, 0x6c, 0xba, 0x58, 0x99, 0xae, 0x8b, 0xd5, 0xd9, 0x75, 0xb1, 0x36, 0x83, 0x2e, 0xd6, 0xa7,
- 0xeb, 0x62, 0x63, 0x06, 0x5d, 0x6c, 0xce, 0xa4, 0x8b, 0x30, 0x59, 0x17, 0x17, 0x26, 0xe8, 0xe2,
- 0xe2, 0x04, 0x5d, 0x6c, 0x4d, 0xd2, 0xc5, 0xf6, 0x14, 0x5d, 0x5c, 0xca, 0xd7, 0xc5, 0xce, 0x1c,
- 0xba, 0xb8, 0x9c, 0xd1, 0xc5, 0x94, 0xb7, 0xd4, 0x66, 0x3b, 0x42, 0xad, 0xcc, 0xa3, 0xad, 0x7f,
- 0xab, 0x82, 0xce, 0xb5, 0xf5, 0xdf, 0xe2, 0xd9, 0xa5, 0x85, 0x54, 0x95, 0x16, 0x52, 0x53, 0x5b,
- 0x48, 0xfd, 0x6c, 0x16, 0xd2, 0x98, 0x6e, 0x21, 0xcd, 0xd9, 0x2d, 0x04, 0x66, 0xb0, 0x90, 0x85,
- 0xe9, 0x16, 0xb2, 0x38, 0x83, 0x85, 0xb4, 0x66, 0xb2, 0x90, 0xf6, 0x64, 0x0b, 0x59, 0x9a, 0x60,
- 0x21, 0x9d, 0x09, 0x16, 0xb2, 0x3c, 0xc9, 0x42, 0xb4, 0x29, 0x16, 0xb2, 0x92, 0x6f, 0x21, 0xab,
- 0x73, 0x58, 0xc8, 0x85, 0x99, 0xbc, 0xf5, 0xda, 0x3c, 0xfa, 0xff, 0x2d, 0xa8, 0x73, 0xf5, 0x7f,
- 0x86, 0xe3, 0x27, 0x5f, 0x98, 0x93, 0x3c, 0x7f, 0x5e, 0x82, 0x0a, 0x3d, 0x40, 0x46, 0x89, 0x69,
- 0x31, 0x9e, 0x98, 0xea, 0x50, 0x3f, 0xc1, 0x7e, 0x10, 0x55, 0x46, 0xe4, 0x70, 0x06, 0x43, 0xba,
- 0x06, 0x9d, 0x10, 0xfb, 0x4e, 0x60, 0x92, 0x81, 0x19, 0x60, 0xff, 0xc4, 0xea, 0x49, 0xa3, 0x6a,
- 0xb3, 0xf9, 0xbd, 0xc1, 0x01, 0x9f, 0xd5, 0x6e, 0x42, 0xbd, 0xc7, 0xcb, 0x07, 0xc2, 0xe9, 0xaf,
- 0xc4, 0x1f, 0x42, 0x54, 0x16, 0x0c, 0x89, 0x43, 0xd1, 0x6d, 0xab, 0x87, 0xdd, 0x80, 0xa7, 0x4f,
- 0x29, 0xf4, 0x87, 0x1c, 0x64, 0x48, 0x1c, 0xa5, 0xf0, 0xeb, 0xf3, 0x08, 0xff, 0x2d, 0x68, 0x32,
- 0x65, 0x60, 0xb5, 0xba, 0x1b, 0xb1, 0x5a, 0x5d, 0x79, 0x72, 0x61, 0x65, 0xeb, 0x2e, 0xb4, 0xbe,
- 0x11, 0x10, 0xd7, 0xc0, 0x03, 0xec, 0x63, 0xb7, 0x87, 0xb5, 0x65, 0xa8, 0x98, 0x3e, 0x1e, 0x08,
- 0x19, 0x97, 0x0d, 0x3c, 0x98, 0x5e, 0x7f, 0xda, 0xf2, 0xa0, 0x2e, 0x9e, 0x69, 0xc6, 0xe2, 0xca,
- 0x99, 0xcf, 0x32, 0xf7, 0xa0, 0x21, 0x81, 0xca, 0x2d, 0x5f, 0x91, 0x55, 0xc5, 0x92, 0xda, 0x01,
- 0x71, 0xe8, 0xd6, 0xbb, 0xb0, 0x10, 0x53, 0x40, 0x25, 0xa5, 0x6b, 0x49, 0x4a, 0x09, 0x61, 0x0a,
- 0xbd, 0x15, 0xc4, 0xde, 0x87, 0x36, 0x23, 0x16, 0x15, 0xd1, 0x54, 0xf4, 0x5e, 0x4f, 0xd2, 0xbb,
- 0xa0, 0x2c, 0x0a, 0x48, 0x92, 0x7b, 0xd0, 0x12, 0x24, 0xc3, 0x63, 0xf6, 0x6e, 0x55, 0x14, 0x6f,
- 0x24, 0x29, 0xae, 0xa6, 0xeb, 0x19, 0x74, 0x61, 0x9a, 0xa0, 0xac, 0x1e, 0xcc, 0x4d, 0x50, 0x2e,
- 0x94, 0x04, 0x3f, 0x02, 0x2d, 0x41, 0x70, 0x7c, 0x76, 0xc8, 0x50, 0xbd, 0x95, 0xa4, 0xba, 0xae,
- 0xa2, 0xca, 0x56, 0xa7, 0x5f, 0x8e, 0x88, 0xa1, 0xf3, 0xbe, 0x1c, 0xa1, 0xe9, 0x82, 0x98, 0x03,
- 0x97, 0x38, 0xb1, 0x6c, 0x69, 0x22, 0x57, 0xb0, 0x6f, 0x27, 0xa9, 0x5f, 0x9d, 0x52, 0xf7, 0x88,
- 0xcb, 0xf9, 0x2d, 0xc9, 0x7b, 0xe8, 0x5b, 0xee, 0x91, 0x92, 0xfa, 0x6a, 0x9c, 0x7a, 0x53, 0x2e,
- 0x7c, 0x0c, 0x9d, 0xd8, 0xc2, 0x5d, 0xdf, 0x47, 0x6a, 0x05, 0xbf, 0x99, 0xe4, 0x2d, 0xe1, 0x53,
- 0x63, 0x6b, 0x25, 0xd9, 0xdf, 0x94, 0xa1, 0xf3, 0x1e, 0x71, 0x93, 0x35, 0x5e, 0x0c, 0x9b, 0xc7,
- 0x4c, 0x83, 0xcd, 0x71, 0xdd, 0xc9, 0x0c, 0x86, 0x87, 0x66, 0xa2, 0xd2, 0xff, 0x72, 0x56, 0xe1,
- 0xb3, 0x09, 0x4e, 0xb7, 0x60, 0xe8, 0xc7, 0x79, 0xc9, 0x8f, 0x0d, 0x97, 0x69, 0xc2, 0x60, 0xf6,
- 0x51, 0x88, 0xd4, 0x3b, 0xf1, 0x67, 0x78, 0x35, 0xbe, 0x53, 0xfe, 0x31, 0xb9, 0x5b, 0x30, 0x36,
- 0x06, 0xf9, 0x87, 0xe8, 0x43, 0xd8, 0x78, 0x32, 0xc4, 0xfe, 0x48, 0xbd, 0x53, 0x39, 0xfb, 0x26,
- 0xdf, 0xa7, 0xd8, 0xca, 0x6d, 0x2e, 0x3e, 0x51, 0x83, 0x34, 0x13, 0xd6, 0x3d, 0x14, 0x1e, 0xab,
- 0xb7, 0xe0, 0xc5, 0x8f, 0xad, 0xb4, 0x15, 0x2a, 0x77, 0x58, 0xf3, 0x94, 0x90, 0xa8, 0x49, 0xf2,
- 0x79, 0x09, 0xf4, 0x3d, 0x34, 0x0c, 0x8f, 0x77, 0x76, 0x7b, 0x3d, 0x1c, 0x04, 0x77, 0x48, 0x1f,
- 0x4f, 0xeb, 0x73, 0x0c, 0x6c, 0xf2, 0x54, 0x56, 0xe5, 0xe9, 0x7f, 0xed, 0x0d, 0x1a, 0x10, 0x88,
- 0x87, 0xe5, 0x91, 0x28, 0x51, 0x1a, 0xe1, 0xd4, 0x0f, 0x18, 0xdc, 0x10, 0x78, 0x34, 0x6b, 0xa2,
- 0xd3, 0xc4, 0xb7, 0xbe, 0xcf, 0xfa, 0x13, 0x26, 0xf5, 0xdf, 0xe2, 0x40, 0x94, 0x00, 0x3c, 0xf6,
- 0x6d, 0x9a, 0xc0, 0x84, 0xe4, 0x53, 0xcc, 0x91, 0x78, 0xfe, 0xd9, 0x60, 0x13, 0x14, 0x98, 0x0a,
- 0x1e, 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6,
- 0x2c, 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xc4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e,
- 0xab, 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d,
- 0x33, 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff,
- 0x07, 0xde, 0xfc, 0xc7, 0xb0, 0x18, 0xe7, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc,
- 0x3f, 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d,
- 0x2f, 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c,
- 0x3b, 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52,
- 0xbc, 0xe3, 0xb9, 0x07, 0xfd, 0x44, 0xef, 0xa7, 0x96, 0xea, 0xfd, 0xc4, 0x7b, 0x46, 0xf5, 0x54,
- 0xcf, 0xe8, 0x2b, 0x89, 0x9e, 0x4d, 0x83, 0x89, 0x6e, 0x43, 0x99, 0x9e, 0xf1, 0x50, 0x1f, 0xef,
- 0xd6, 0xbc, 0x19, 0xef, 0xd6, 0x34, 0xb3, 0x99, 0x9d, 0x4c, 0x70, 0x12, 0x3d, 0x9a, 0x58, 0x6b,
- 0x0b, 0x92, 0xad, 0xad, 0xcb, 0x00, 0x7d, 0xec, 0xf9, 0xb8, 0x87, 0x42, 0xdc, 0x17, 0xa7, 0xde,
- 0xd8, 0xcc, 0xd9, 0xba, 0x3b, 0x2a, 0xf5, 0x6b, 0xcd, 0xa3, 0x7e, 0xbf, 0x2c, 0x42, 0x33, 0xca,
- 0x22, 0x6e, 0x43, 0xfb, 0x90, 0xf4, 0x63, 0xf1, 0x56, 0x24, 0x0e, 0x89, 0x04, 0x2f, 0x91, 0x78,
- 0x74, 0x0b, 0x46, 0xeb, 0x30, 0x91, 0x89, 0x3c, 0x04, 0xcd, 0x25, 0xae, 0x99, 0xa2, 0xc3, 0xd3,
- 0x82, 0x4b, 0x09, 0xa6, 0x52, 0x39, 0x4c, 0xb7, 0x60, 0x74, 0xdc, 0xd4, 0x5c, 0x14, 0x3d, 0x8f,
- 0x60, 0x55, 0xd5, 0x67, 0xd3, 0xf6, 0x26, 0xdb, 0xcb, 0x46, 0x46, 0x0c, 0x51, 0x62, 0xae, 0x36,
- 0x99, 0xcf, 0x8a, 0xd0, 0x4e, 0x6a, 0x87, 0xf6, 0x25, 0x68, 0xa6, 0x25, 0xa2, 0xce, 0xf5, 0xbb,
- 0x05, 0x23, 0xc2, 0xa4, 0xd2, 0xfc, 0x24, 0x20, 0x2e, 0x3d, 0x83, 0xf1, 0x13, 0x99, 0x2a, 0x5d,
- 0x4e, 0x1c, 0xd9, 0xa8, 0x34, 0x3f, 0x89, 0x4f, 0x44, 0xcf, 0xff, 0xfb, 0x32, 0x34, 0xc6, 0x47,
- 0x07, 0xc5, 0xc9, 0xee, 0x35, 0x28, 0x1f, 0xe1, 0x50, 0x75, 0x12, 0x19, 0xdb, 0xbf, 0x41, 0x31,
- 0x28, 0xa2, 0x37, 0x0c, 0x85, 0x7f, 0xcc, 0x43, 0xf4, 0x86, 0xa1, 0x76, 0x1d, 0x2a, 0x1e, 0x09,
- 0x64, 0x07, 0x28, 0x07, 0x93, 0xa1, 0x68, 0x37, 0xa1, 0xd6, 0xc7, 0x36, 0x0e, 0xb1, 0x38, 0x51,
- 0xe7, 0x20, 0x0b, 0x24, 0xed, 0x16, 0xd4, 0x89, 0xc7, 0xdb, 0x90, 0xb5, 0x49, 0xf8, 0x12, 0x8b,
- 0xb2, 0x42, 0x53, 0x52, 0x51, 0xe4, 0xca, 0x63, 0x85, 0xa2, 0xd0, 0x33, 0x99, 0x87, 0xc2, 0xde,
- 0xb1, 0x68, 0x5f, 0xe4, 0xe0, 0x72, 0x9c, 0x94, 0x9b, 0x68, 0xce, 0xe5, 0x26, 0xce, 0xdc, 0x41,
- 0xfa, 0x6b, 0x15, 0xd6, 0xd4, 0xd9, 0xe4, 0x79, 0x8d, 0xf1, 0xbc, 0xc6, 0xf8, 0xdf, 0x5e, 0x63,
- 0x7c, 0x0a, 0x55, 0x76, 0x41, 0x43, 0x49, 0xa9, 0x38, 0x07, 0x25, 0xed, 0x26, 0x54, 0xd8, 0x6d,
- 0x93, 0x12, 0x5b, 0xb4, 0xae, 0x70, 0xf8, 0xa2, 0x6e, 0xc2, 0xd0, 0xb6, 0x7e, 0x56, 0x85, 0xa5,
- 0x94, 0xd6, 0x9e, 0xf7, 0xa4, 0xce, 0x7b, 0x52, 0x67, 0xea, 0x49, 0xa9, 0x74, 0x58, 0x9b, 0xc7,
- 0x1a, 0xbe, 0x0d, 0x10, 0xa5, 0x20, 0xcf, 0xf9, 0xce, 0xd7, 0xaf, 0x6a, 0x70, 0x31, 0xa7, 0x30,
- 0x72, 0x7e, 0x4d, 0xe1, 0xfc, 0x9a, 0xc2, 0xf9, 0x35, 0x85, 0xc8, 0x0c, 0xff, 0x5e, 0x84, 0xc6,
- 0xb8, 0x9c, 0x3e, 0xfd, 0x62, 0xd7, 0xf6, 0xb8, 0x3b, 0xc3, 0xd3, 0xee, 0xb5, 0x6c, 0xcd, 0x9a,
- 0x05, 0x1e, 0x79, 0xf5, 0xf5, 0x26, 0xd4, 0x79, 0x65, 0x55, 0x06, 0x8f, 0x95, 0x6c, 0x41, 0x36,
- 0x30, 0x24, 0x8e, 0xf6, 0x06, 0x34, 0xc4, 0x75, 0x25, 0x79, 0xb2, 0x5e, 0x4d, 0x9e, 0xac, 0x39,
- 0xcc, 0x18, 0x63, 0x9d, 0xfd, 0x4e, 0x33, 0x86, 0x15, 0xc5, 0x65, 0x44, 0xed, 0xbd, 0xc9, 0x0e,
- 0x29, 0x1b, 0x73, 0xc7, 0xad, 0x05, 0xb5, 0x4b, 0xfa, 0x49, 0x11, 0x5a, 0xc9, 0x2e, 0xc3, 0x0e,
- 0x75, 0x44, 0x7c, 0x62, 0x7c, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x31, 0xde, 0xf3, 0x3d,
- 0x5f, 0xfd, 0xb4, 0x08, 0xcd, 0xf1, 0xc9, 0x5e, 0xbb, 0x03, 0x2d, 0xb9, 0x8d, 0xd9, 0x23, 0x7d,
- 0x2c, 0x1e, 0xf4, 0x72, 0xee, 0x83, 0xf2, 0x6e, 0xc7, 0xa2, 0x5c, 0x74, 0x87, 0xf4, 0xd5, 0xad,
- 0xc0, 0xd2, 0x3c, 0x6f, 0xe3, 0xd7, 0x4d, 0xa8, 0x09, 0x47, 0xad, 0x38, 0xf1, 0xe5, 0x25, 0x28,
- 0xe3, 0xde, 0x6a, 0x79, 0xc2, 0xa5, 0xbf, 0xca, 0xc4, 0x4b, 0x7f, 0xd3, 0x12, 0x8f, 0x94, 0x25,
- 0xd6, 0x32, 0x96, 0x18, 0x73, 0x89, 0xf5, 0x19, 0x5c, 0x62, 0x63, 0xba, 0x4b, 0x6c, 0xce, 0xe0,
- 0x12, 0x61, 0x26, 0x97, 0xb8, 0x30, 0xd9, 0x25, 0x2e, 0x4e, 0x70, 0x89, 0xad, 0x09, 0x2e, 0xb1,
- 0x3d, 0xc9, 0x25, 0x2e, 0x4d, 0x71, 0x89, 0x9d, 0xac, 0x4b, 0x7c, 0x05, 0xda, 0x94, 0x78, 0xcc,
- 0xd8, 0xf8, 0x49, 0xa0, 0xe5, 0xa0, 0xd3, 0x58, 0xae, 0x40, 0xd1, 0x2c, 0x37, 0x8e, 0xa6, 0x09,
- 0x34, 0xcb, 0x8d, 0xa1, 0xc5, 0x03, 0xfd, 0x4a, 0xea, 0x9a, 0xe6, 0x4c, 0x27, 0x82, 0x8f, 0xf2,
- 0x5c, 0xc0, 0x85, 0x6c, 0x6b, 0x29, 0xef, 0xd3, 0x13, 0xb5, 0x37, 0xd0, 0xae, 0x89, 0xb0, 0xbf,
- 0x96, 0xb5, 0xfb, 0x47, 0x23, 0x0f, 0xf3, 0xdc, 0x9d, 0x25, 0x03, 0xaf, 0xcb, 0xa0, 0x7f, 0x31,
- 0x7b, 0xb8, 0x1f, 0x37, 0xcd, 0x65, 0xb8, 0xbf, 0x0e, 0x35, 0x64, 0xdb, 0x54, 0x3f, 0xf5, 0xdc,
- 0xde, 0x79, 0x15, 0xd9, 0xf6, 0xde, 0x40, 0xfb, 0x32, 0x40, 0xec, 0x89, 0xd6, 0xb3, 0xce, 0x3c,
- 0xe2, 0xd6, 0x88, 0x61, 0x6a, 0x2f, 0x43, 0xab, 0x6f, 0x51, 0x0b, 0x72, 0x2c, 0x17, 0x85, 0xc4,
- 0xd7, 0x37, 0x98, 0x82, 0x24, 0x27, 0x93, 0x57, 0x5e, 0x37, 0x53, 0x57, 0x5e, 0x5f, 0x82, 0xf2,
- 0xa9, 0x63, 0xeb, 0x97, 0xb2, 0x16, 0xf7, 0xa1, 0x63, 0x1b, 0x14, 0x96, 0x2d, 0xb3, 0xbe, 0xf0,
- 0xac, 0xb7, 0x62, 0x2f, 0x3f, 0xc3, 0xad, 0xd8, 0x17, 0xe7, 0xf1, 0x58, 0x3f, 0x00, 0x88, 0xe2,
- 0xde, 0x9c, 0x5f, 0x1a, 0xbd, 0x0d, 0x0b, 0x03, 0xcb, 0xc6, 0x66, 0x7e, 0x48, 0x8d, 0x6e, 0x3c,
- 0x77, 0x0b, 0x06, 0x0c, 0xc6, 0xa3, 0xc8, 0x8b, 0x87, 0xb0, 0xa2, 0xe8, 0xe6, 0x6a, 0xdf, 0x9d,
- 0x1c, 0xbf, 0xae, 0x65, 0x13, 0xea, 0x9c, 0x96, 0xb0, 0x3a, 0x9c, 0xfd, 0xa9, 0x02, 0x17, 0xf3,
- 0x9a, 0xd1, 0x0e, 0xbc, 0x70, 0x88, 0x02, 0xab, 0x67, 0xa2, 0xc4, 0x57, 0x42, 0xe6, 0xb8, 0xe6,
- 0xcb, 0x45, 0xf3, 0x5a, 0xa2, 0xc2, 0x9a, 0xff, 0x55, 0x51, 0xb7, 0x60, 0x6c, 0x1e, 0x4e, 0xf8,
- 0xe8, 0xe8, 0x3e, 0x74, 0x90, 0x67, 0x99, 0x9f, 0xe2, 0x51, 0xb4, 0x03, 0x97, 0x64, 0xa2, 0xae,
- 0x95, 0xfc, 0xca, 0xaa, 0x5b, 0x30, 0xda, 0x28, 0xf9, 0xdd, 0xd5, 0xf7, 0x40, 0x27, 0xac, 0x2d,
- 0x61, 0x5a, 0xa2, 0x21, 0x15, 0xd1, 0x2b, 0x67, 0xbb, 0xa2, 0xea, 0xde, 0x55, 0xb7, 0x60, 0xac,
- 0x11, 0x75, 0x57, 0x2b, 0xa2, 0xef, 0x89, 0x5e, 0x4f, 0x44, 0xbf, 0x92, 0x47, 0x3f, 0xdd, 0x16,
- 0x8a, 0xe8, 0x67, 0x1a, 0x46, 0x47, 0xb0, 0x29, 0xe8, 0xa3, 0xa8, 0x91, 0x18, 0x6d, 0xc1, 0x03,
- 0xdc, 0x2b, 0xd9, 0x2d, 0x14, 0x6d, 0xc7, 0x6e, 0xc1, 0x58, 0x27, 0xb9, 0x3d, 0x49, 0x1c, 0x6d,
- 0xc4, 0xba, 0xba, 0x2c, 0x5d, 0x88, 0x36, 0xaa, 0x65, 0xbd, 0x63, 0x5e, 0x0f, 0xb8, 0x5b, 0x30,
- 0x84, 0x4c, 0xb2, 0xb0, 0x48, 0xc3, 0x8f, 0x23, 0x0d, 0x8f, 0xb5, 0x04, 0xb4, 0xf7, 0x27, 0x6b,
- 0xf8, 0xa5, 0x9c, 0xb6, 0x11, 0xbf, 0x58, 0xa0, 0xd6, 0xea, 0xab, 0xb0, 0x10, 0xbf, 0xb9, 0xb0,
- 0x1a, 0x7d, 0xdc, 0x57, 0x8e, 0xee, 0x38, 0xfc, 0xb6, 0x08, 0xe5, 0x47, 0x48, 0x7d, 0x2b, 0x62,
- 0xfa, 0xc7, 0x6e, 0x19, 0xcf, 0x56, 0x3e, 0xf3, 0x37, 0x22, 0x73, 0x7d, 0xc1, 0x75, 0x05, 0x1a,
- 0x32, 0xc2, 0xe4, 0x3c, 0xdf, 0xc7, 0xb0, 0xf4, 0x41, 0xaa, 0xde, 0xf4, 0x1c, 0x3f, 0x26, 0xf9,
- 0x5d, 0x11, 0xca, 0x1f, 0x3a, 0xb6, 0x52, 0x7a, 0x97, 0xa0, 0x49, 0x7f, 0x03, 0x0f, 0xf5, 0xe4,
- 0xbd, 0x92, 0x68, 0x82, 0x26, 0x7f, 0x9e, 0x8f, 0x07, 0xd6, 0xa9, 0xc8, 0xf2, 0xc4, 0x88, 0xae,
- 0x42, 0x61, 0xe8, 0x5b, 0x87, 0xc3, 0x10, 0x8b, 0xcf, 0xf4, 0xa2, 0x09, 0x9a, 0xca, 0x3c, 0xf5,
- 0x91, 0xe7, 0xe1, 0xbe, 0x38, 0x82, 0xcb, 0xe1, 0x99, 0xfb, 0x98, 0xb7, 0x5f, 0x85, 0x36, 0xf1,
- 0x8f, 0x24, 0xae, 0x79, 0xb2, 0x73, 0x7b, 0x51, 0x7c, 0xbb, 0xba, 0xef, 0x93, 0x90, 0xec, 0x17,
- 0x7f, 0x51, 0x2a, 0xef, 0xed, 0x1e, 0x1c, 0xd6, 0xd8, 0xc7, 0xa0, 0x6f, 0xfe, 0x33, 0x00, 0x00,
- 0xff, 0xff, 0xdc, 0xb2, 0x46, 0x98, 0xe4, 0x3a, 0x00, 0x00,
+func (x *Xml) GetVendorExtension() []*NamedAny {
+ if x != nil {
+ return x.VendorExtension
+ }
+ return nil
+}
+
+var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor
+
+var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e,
+ 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c,
+ 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07,
+ 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52,
+ 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f,
+ 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69,
+ 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d,
+ 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63,
+ 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10,
+ 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65,
+ 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01,
+ 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12,
+ 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65,
+ 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65,
+ 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a,
+ 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76,
+ 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86,
+ 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72,
+ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e,
+ 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a,
+ 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15,
+ 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44,
+ 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67,
+ 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65,
+ 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66,
+ 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62,
+ 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a,
+ 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61,
+ 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61,
+ 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40,
+ 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
+ 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12,
+ 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52,
+ 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
+ 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65,
+ 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a,
+ 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72,
+ 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e,
+ 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74,
+ 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12,
+ 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08,
+ 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+ 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64,
+ 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74,
+ 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12,
+ 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12,
+ 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a,
+ 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74,
+ 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73,
+ 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a,
+ 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d,
+ 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65,
+ 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12,
+ 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63,
+ 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d,
+ 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18,
+ 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b,
+ 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d,
+ 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08,
+ 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71,
+ 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
+ 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65,
+ 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18,
+ 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f,
+ 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e,
+ 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65,
+ 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73,
+ 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b,
+ 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75,
+ 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d,
+ 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69,
+ 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69,
+ 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d,
+ 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74,
+ 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61,
+ 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d,
+ 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69,
+ 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49,
+ 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69,
+ 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71,
+ 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18,
+ 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52,
+ 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a,
+ 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69,
+ 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b,
+ 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
+ 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+ 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61,
+ 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63,
+ 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a,
+ 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07,
+ 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75,
+ 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e,
+ 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74,
+ 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e,
+ 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69,
+ 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65,
+ 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e,
+ 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75,
+ 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12,
+ 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52,
+ 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61,
+ 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72,
+ 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50,
+ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e,
+ 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
+ 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d,
+ 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e,
+ 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a,
+ 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65,
+ 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10,
+ 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65,
+ 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a,
+ 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65,
+ 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07,
+ 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75,
+ 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a,
+ 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76,
+ 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45,
+ 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61,
+ 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
+ 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37,
+ 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64,
+ 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5,
+ 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61,
+ 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00,
+ 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f,
+ 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f,
+ 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53,
+ 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19,
+ 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73,
+ 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74,
+ 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a,
+ 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68,
+ 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73,
+ 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a,
+ 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+ 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e,
+ 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f,
+ 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f,
+ 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77,
+ 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61,
+ 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+ 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12,
+ 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e,
+ 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70,
+ 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52,
+ 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72,
+ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74,
+ 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63,
+ 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63,
+ 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76,
+ 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e,
+ 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c,
+ 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15,
+ 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74,
+ 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07,
+ 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73,
+ 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
+ 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72,
+ 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d,
+ 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d,
+ 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
+ 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74,
+ 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33,
+ 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74,
+ 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65,
+ 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64,
+ 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64,
+ 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42,
+ 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d,
+ 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a,
+ 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f,
+ 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15,
+ 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01,
+ 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d,
+ 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61,
+ 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f,
+ 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f,
+ 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73,
+ 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65,
+ 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a,
+ 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73,
+ 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05,
+ 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72,
+ 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d,
+ 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+ 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65,
+ 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+ 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e,
+ 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61,
+ 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78,
+ 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76,
+ 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75,
+ 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65,
+ 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d,
+ 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76,
+ 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f,
+ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61,
+ 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e,
+ 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72,
+ 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a,
+ 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e,
+ 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a,
+ 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e,
+ 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f,
+ 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c,
+ 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64,
+ 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a,
+ 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76,
+ 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d,
+ 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50,
+ 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05,
+ 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d,
+ 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a,
+ 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74,
+ 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73,
+ 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a,
+ 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d,
+ 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65,
+ 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12,
+ 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63,
+ 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d,
+ 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18,
+ 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b,
+ 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d,
+ 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08,
+ 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71,
+ 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
+ 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65,
+ 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18,
+ 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f,
+ 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e,
+ 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73,
+ 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70,
+ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d,
+ 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8,
+ 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
+ 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73,
+ 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12,
+ 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63,
+ 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d,
+ 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75,
+ 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d,
+ 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69,
+ 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63,
+ 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70,
+ 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61,
+ 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65,
+ 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65,
+ 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18,
+ 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12,
+ 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18,
+ 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65,
+ 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e,
+ 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69,
+ 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75,
+ 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64,
+ 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d,
+ 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07,
+ 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52,
+ 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e,
+ 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f,
+ 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65,
+ 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f,
+ 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f,
+ 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73,
+ 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63,
+ 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f,
+ 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e,
+ 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65,
+ 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11,
+ 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
+ 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69,
+ 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e,
+ 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69,
+ 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+ 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18,
+ 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f,
+ 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78,
+ 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65,
+ 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65,
+ 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65,
+ 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65,
+ 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f,
+ 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d,
+ 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+ 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18,
+ 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12,
+ 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04,
+ 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12,
+ 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49,
+ 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65,
+ 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52,
+ 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66,
+ 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f,
+ 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18,
+ 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70,
+ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+ 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+ 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03,
+ 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12,
+ 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73,
+ 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73,
+ 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29,
+ 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79,
+ 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e,
+ 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f,
+ 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73,
+ 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66,
+ 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d,
+ 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b,
+ 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61,
+ 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d,
+ 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69,
+ 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75,
+ 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61,
+ 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
+ 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75,
+ 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b,
+ 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f,
+ 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68,
+ 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13,
+ 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d,
+ 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61,
+ 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79,
+ 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70,
+ 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03,
+ 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64,
+ 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70,
+ 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56,
+ 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49,
+ 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f,
+ 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64,
+ 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50,
+ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d,
+ 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61,
+ 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70,
+ 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64,
+ 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02,
+ 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once
+ file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc
+)
+
+func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte {
+ file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() {
+ file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData)
+ })
+ return file_openapiv2_OpenAPIv2_proto_rawDescData
+}
+
+var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60)
+var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{
+ (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem
+ (*Any)(nil), // 1: openapi.v2.Any
+ (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity
+ (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity
+ (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter
+ (*Contact)(nil), // 5: openapi.v2.Contact
+ (*Default)(nil), // 6: openapi.v2.Default
+ (*Definitions)(nil), // 7: openapi.v2.Definitions
+ (*Document)(nil), // 8: openapi.v2.Document
+ (*Examples)(nil), // 9: openapi.v2.Examples
+ (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs
+ (*FileSchema)(nil), // 11: openapi.v2.FileSchema
+ (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema
+ (*Header)(nil), // 13: openapi.v2.Header
+ (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema
+ (*Headers)(nil), // 15: openapi.v2.Headers
+ (*Info)(nil), // 16: openapi.v2.Info
+ (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem
+ (*JsonReference)(nil), // 18: openapi.v2.JsonReference
+ (*License)(nil), // 19: openapi.v2.License
+ (*NamedAny)(nil), // 20: openapi.v2.NamedAny
+ (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader
+ (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter
+ (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem
+ (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse
+ (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue
+ (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema
+ (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem
+ (*NamedString)(nil), // 28: openapi.v2.NamedString
+ (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray
+ (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter
+ (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity
+ (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity
+ (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity
+ (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity
+ (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes
+ (*Operation)(nil), // 36: openapi.v2.Operation
+ (*Parameter)(nil), // 37: openapi.v2.Parameter
+ (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions
+ (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem
+ (*PathItem)(nil), // 40: openapi.v2.PathItem
+ (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema
+ (*Paths)(nil), // 42: openapi.v2.Paths
+ (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems
+ (*Properties)(nil), // 44: openapi.v2.Properties
+ (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema
+ (*Response)(nil), // 46: openapi.v2.Response
+ (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions
+ (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue
+ (*Responses)(nil), // 49: openapi.v2.Responses
+ (*Schema)(nil), // 50: openapi.v2.Schema
+ (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem
+ (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions
+ (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem
+ (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement
+ (*StringArray)(nil), // 55: openapi.v2.StringArray
+ (*Tag)(nil), // 56: openapi.v2.Tag
+ (*TypeItem)(nil), // 57: openapi.v2.TypeItem
+ (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension
+ (*Xml)(nil), // 59: openapi.v2.Xml
+ (*anypb.Any)(nil), // 60: google.protobuf.Any
+}
+var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{
+ 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema
+ 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any
+ 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny
+ 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny
+ 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema
+ 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny
+ 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny
+ 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny
+ 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema
+ 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info
+ 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths
+ 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions
+ 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions
+ 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions
+ 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement
+ 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions
+ 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag
+ 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs
+ 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny
+ 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny
+ 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny
+ 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any
+ 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs
+ 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any
+ 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny
+ 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems
+ 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any
+ 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any
+ 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny
+ 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems
+ 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any
+ 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any
+ 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny
+ 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems
+ 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any
+ 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any
+ 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny
+ 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader
+ 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact
+ 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License
+ 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny
+ 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema
+ 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny
+ 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any
+ 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header
+ 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter
+ 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem
+ 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response
+ 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue
+ 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema
+ 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem
+ 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray
+ 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema
+ 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema
+ 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema
+ 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema
+ 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes
+ 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny
+ 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes
+ 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny
+ 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes
+ 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny
+ 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes
+ 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny
+ 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString
+ 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs
+ 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem
+ 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses
+ 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement
+ 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny
+ 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter
+ 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter
+ 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter
+ 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter
+ 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference
+ 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation
+ 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation
+ 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation
+ 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation
+ 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation
+ 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation
+ 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation
+ 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem
+ 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny
+ 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems
+ 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any
+ 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any
+ 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny
+ 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny
+ 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem
+ 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems
+ 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any
+ 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any
+ 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny
+ 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema
+ 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems
+ 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any
+ 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any
+ 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny
+ 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem
+ 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers
+ 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples
+ 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny
+ 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse
+ 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response
+ 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference
+ 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue
+ 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny
+ 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any
+ 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any
+ 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem
+ 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem
+ 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem
+ 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema
+ 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties
+ 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml
+ 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs
+ 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any
+ 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny
+ 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema
+ 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema
+ 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem
+ 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity
+ 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity
+ 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity
+ 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity
+ 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity
+ 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity
+ 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray
+ 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs
+ 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny
+ 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny
+ 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny
+ 133, // [133:133] is the sub-list for method output_type
+ 133, // [133:133] is the sub-list for method input_type
+ 133, // [133:133] is the sub-list for extension type_name
+ 133, // [133:133] is the sub-list for extension extendee
+ 0, // [0:133] is the sub-list for field type_name
+}
+
+func init() { file_openapiv2_OpenAPIv2_proto_init() }
+func file_openapiv2_OpenAPIv2_proto_init() {
+ if File_openapiv2_OpenAPIv2_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AdditionalPropertiesItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Any); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ApiKeySecurity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BasicAuthenticationSecurity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BodyParameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Contact); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Default); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Definitions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Document); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Examples); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExternalDocs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileSchema); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FormDataParameterSubSchema); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Header); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderParameterSubSchema); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Headers); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Info); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ItemsItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*JsonReference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*License); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedAny); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedHeader); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedParameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedPathItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedResponseValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedSchema); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedSecurityDefinitionsItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedString); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NamedStringArray); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NonBodyParameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Oauth2AccessCodeSecurity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Oauth2ApplicationSecurity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Oauth2ImplicitSecurity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Oauth2PasswordSecurity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Oauth2Scopes); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Operation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Parameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParameterDefinitions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParametersItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PathItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PathParameterSubSchema); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Paths); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PrimitivesItems); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Properties); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryParameterSubSchema); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Response); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResponseDefinitions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResponseValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Responses); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Schema); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SchemaItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecurityDefinitions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecurityDefinitionsItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecurityRequirement); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StringArray); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Tag); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypeItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VendorExtension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Xml); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*AdditionalPropertiesItem_Schema)(nil),
+ (*AdditionalPropertiesItem_Boolean)(nil),
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{
+ (*NonBodyParameter_HeaderParameterSubSchema)(nil),
+ (*NonBodyParameter_FormDataParameterSubSchema)(nil),
+ (*NonBodyParameter_QueryParameterSubSchema)(nil),
+ (*NonBodyParameter_PathParameterSubSchema)(nil),
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{
+ (*Parameter_BodyParameter)(nil),
+ (*Parameter_NonBodyParameter)(nil),
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{
+ (*ParametersItem_Parameter)(nil),
+ (*ParametersItem_JsonReference)(nil),
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{
+ (*ResponseValue_Response)(nil),
+ (*ResponseValue_JsonReference)(nil),
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{
+ (*SchemaItem_Schema)(nil),
+ (*SchemaItem_FileSchema)(nil),
+ }
+ file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{
+ (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil),
+ (*SecurityDefinitionsItem_ApiKeySecurity)(nil),
+ (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil),
+ (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil),
+ (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil),
+ (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 60,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes,
+ DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs,
+ MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes,
+ }.Build()
+ File_openapiv2_OpenAPIv2_proto = out.File
+ file_openapiv2_OpenAPIv2_proto_rawDesc = nil
+ file_openapiv2_OpenAPIv2_proto_goTypes = nil
+ file_openapiv2_OpenAPIv2_proto_depIdxs = nil
}
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto
index 557c88072..1c59b2f4a 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2020 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -41,6 +41,9 @@ option java_package = "org.openapi_v2";
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
option objc_class_prefix = "OAS";
+// The Go package name.
+option go_package = "./openapiv2;openapi_v2";
+
message AdditionalPropertiesItem {
oneof oneof {
Schema schema = 1;
@@ -553,7 +556,7 @@ message Response {
repeated NamedAny vendor_extension = 5;
}
-// One or more JSON representations for parameters
+// One or more JSON representations for responses
message ResponseDefinitions {
repeated NamedResponse additional_properties = 1;
}
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/README.md b/vendor/github.com/googleapis/gnostic/openapiv2/README.md
index 836fb32a7..5276128d3 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/README.md
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/README.md
@@ -1,16 +1,14 @@
# OpenAPI v2 Protocol Buffer Models
-This directory contains a Protocol Buffer-language model
-and related code for supporting OpenAPI v2.
+This directory contains a Protocol Buffer-language model and related code for
+supporting OpenAPI v2.
-Gnostic applications and plugins can use OpenAPIv2.proto
-to generate Protocol Buffer support code for their preferred languages.
+Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol
+Buffer support code for their preferred languages.
-OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI
-descriptions into the Protocol Buffer-based datastructures
-generated from OpenAPIv2.proto.
+OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into
+the Protocol Buffer-based datastructures generated from OpenAPIv2.proto.
-OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic
-compiler generator, and OpenAPIv2.pb.go is generated by
-protoc, the Protocol Buffer compiler, and protoc-gen-go, the
-Protocol Buffer Go code generation plugin.
+OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler
+generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer
+compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin.
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/document.go b/vendor/github.com/googleapis/gnostic/openapiv2/document.go
new file mode 100644
index 000000000..56e5966b4
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/document.go
@@ -0,0 +1,41 @@
+// Copyright 2020 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openapi_v2
+
+import (
+ "github.com/googleapis/gnostic/compiler"
+ "gopkg.in/yaml.v3"
+)
+
+// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation.
+func ParseDocument(b []byte) (*Document, error) {
+ info, err := compiler.ReadInfoFromBytes("", b)
+ if err != nil {
+ return nil, err
+ }
+ root := info.Content[0]
+ return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil))
+}
+
+// YAMLValue produces a serialized YAML representation of the document.
+func (d *Document) YAMLValue(comment string) ([]byte, error) {
+ rawInfo := d.ToRawInfo()
+ rawInfo = &yaml.Node{
+ Kind: yaml.DocumentNode,
+ Content: []*yaml.Node{rawInfo},
+ HeadComment: comment,
+ }
+ return yaml.Marshal(rawInfo)
+}
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json
index 2815a26ea..afa12b79b 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json
@@ -203,7 +203,7 @@
"additionalProperties": {
"$ref": "#/definitions/response"
},
- "description": "One or more JSON representations for parameters"
+ "description": "One or more JSON representations for responses"
},
"externalDocs": {
"type": "object",
@@ -1607,4 +1607,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml
index 5da898291..8cba23cb0 100644
--- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml
+++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml
@@ -13,7 +13,7 @@
Run gophercloud acceptance test on master branch
run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml
timeout: 18000 # 5 hours
- nodeset: ubuntu-bionic
+ nodeset: ubuntu-focal
- job:
name: gophercloud-acceptance-test-ironic
@@ -21,7 +21,7 @@
description: |
Run gophercloud ironic acceptance test on master branch
run: .zuul/playbooks/gophercloud-acceptance-test-ironic/run.yaml
- nodeset: ubuntu-bionic
+ nodeset: ubuntu-focal
- job:
name: gophercloud-acceptance-test-ussuri
diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md
index b436f0076..b28cfe686 100644
--- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md
+++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md
@@ -1,4 +1,137 @@
-## 0.16.0 (Unreleased)
+## 0.21.0 (Unreleased)
+
+## 0.20.0 (August 10, 2021)
+
+IMPROVEMENTS
+
+* Added `RetryFunc` to enable custom retry functions. [GH-2194](https://github.com/gophercloud/gophercloud/pull/2194)
+* Added `openstack/baremetal/v1/nodes.GetVendorPassthruMethods` [GH-2201](https://github.com/gophercloud/gophercloud/pull/2201)
+* Added `openstack/baremetal/v1/nodes.GetAllSubscriptions` [GH-2201](https://github.com/gophercloud/gophercloud/pull/2201)
+* Added `openstack/baremetal/v1/nodes.GetSubscription` [GH-2201](https://github.com/gophercloud/gophercloud/pull/2201)
+* Added `openstack/baremetal/v1/nodes.DeleteSubscription` [GH-2201](https://github.com/gophercloud/gophercloud/pull/2201)
+* Added `openstack/baremetal/v1/nodes.CreateSubscription` [GH-2201](https://github.com/gophercloud/gophercloud/pull/2201)
+
+## 0.19.0 (July 22, 2021)
+
+NOTES / BREAKING CHANGES
+
+* `compute/v2/extensions/keypairs.List` now takes a `ListOptsBuilder` argument [GH-2186](https://github.com/gophercloud/gophercloud/pull/2186)
+* `compute/v2/extensions/keypairs.Get` now takes a `GetOptsBuilder` argument [GH-2186](https://github.com/gophercloud/gophercloud/pull/2186)
+* `compute/v2/extensions/keypairs.Delete` now takes a `DeleteOptsBuilder` argument [GH-2186](https://github.com/gophercloud/gophercloud/pull/2186)
+* `compute/v2/extensions/hypervisors.List` now takes a `ListOptsBuilder` argument [GH-2187](https://github.com/gophercloud/gophercloud/pull/2187)
+
+IMPROVEMENTS
+
+* Added `blockstorage/v3/qos.List` [GH-2167](https://github.com/gophercloud/gophercloud/pull/2167)
+* Added `compute/v2/extensions/volumeattach.CreateOpts.Tag` [GH-2177](https://github.com/gophercloud/gophercloud/pull/2177)
+* Added `compute/v2/extensions/volumeattach.CreateOpts.DeleteOnTermination` [GH-2177](https://github.com/gophercloud/gophercloud/pull/2177)
+* Added `compute/v2/extensions/volumeattach.VolumeAttachment.Tag` [GH-2177](https://github.com/gophercloud/gophercloud/pull/2177)
+* Added `compute/v2/extensions/volumeattach.VolumeAttachment.DeleteOnTermination` [GH-2177](https://github.com/gophercloud/gophercloud/pull/2177)
+* Added `db/v1/instances.Instance.Address` [GH-2179](https://github.com/gophercloud/gophercloud/pull/2179)
+* Added `compute/v2/servers.ListOpts.AvailabilityZone` [GH-2098](https://github.com/gophercloud/gophercloud/pull/2098)
+* Added `compute/v2/extensions/keypairs.ListOpts` [GH-2186](https://github.com/gophercloud/gophercloud/pull/2186)
+* Added `compute/v2/extensions/keypairs.GetOpts` [GH-2186](https://github.com/gophercloud/gophercloud/pull/2186)
+* Added `compute/v2/extensions/keypairs.DeleteOpts` [GH-2186](https://github.com/gophercloud/gophercloud/pull/2186)
+* Added `objectstorage/v2/containers.GetHeader.Timestamp` [GH-2185](https://github.com/gophercloud/gophercloud/pull/2185)
+* Added `compute/v2/extensions.ListOpts` [GH-2187](https://github.com/gophercloud/gophercloud/pull/2187)
+* Added `sharedfilesystems/v2/shares.Share.CreateShareFromSnapshotSupport` [GH-2191](https://github.com/gophercloud/gophercloud/pull/2191)
+* Added `compute/v2/servers.Network.Tag` for use in `CreateOpts` [GH-2193](https://github.com/gophercloud/gophercloud/pull/2193)
+
+## 0.18.0 (June 11, 2021)
+
+NOTES / BREAKING CHANGES
+
+* As of [GH-2160](https://github.com/gophercloud/gophercloud/pull/2160), Gophercloud no longer URL encodes Object Storage containers and object names. You can still encode them yourself before passing the names to the Object Storage functions.
+
+* `baremetal/v1/nodes.ListBIOSSettings` now takes three parameters. The third, new, parameter is `ListBIOSSettingsOptsBuilder` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+
+BUG FIXES
+
+* Fixed expected OK codes to use default codes [GH-2173](https://github.com/gophercloud/gophercloud/pull/2173)
+* Fixed inablity to create sub-containers (objects with `/` in their name) [GH-2160](https://github.com/gophercloud/gophercloud/pull/2160)
+
+IMPROVEMENTS
+
+* Added `orchestration/v1/stacks.ListOpts.ShowHidden` [GH-2104](https://github.com/gophercloud/gophercloud/pull/2104)
+* Added `loadbalancer/v2/listeners.ProtocolSCTP` [GH-2149](https://github.com/gophercloud/gophercloud/pull/2149)
+* Added `loadbalancer/v2/listeners.CreateOpts.TLSVersions` [GH-2150](https://github.com/gophercloud/gophercloud/pull/2150)
+* Added `loadbalancer/v2/listeners.UpdateOpts.TLSVersions` [GH-2150](https://github.com/gophercloud/gophercloud/pull/2150)
+* Added `baremetal/v1/nodes.CreateOpts.NetworkData` [GH-2154](https://github.com/gophercloud/gophercloud/pull/2154)
+* Added `baremetal/v1/nodes.Node.NetworkData` [GH-2154](https://github.com/gophercloud/gophercloud/pull/2154)
+* Added `loadbalancer/v2/pools.ProtocolPROXYV2` [GH-2158](https://github.com/gophercloud/gophercloud/pull/2158)
+* Added `loadbalancer/v2/pools.ProtocolSCTP` [GH-2158](https://github.com/gophercloud/gophercloud/pull/2158)
+* Added `placement/v1/resourceproviders.GetAllocations` [GH-2162](https://github.com/gophercloud/gophercloud/pull/2162)
+* Added `baremetal/v1/nodes.CreateOpts.BIOSInterface` [GH-2164](https://github.com/gophercloud/gophercloud/pull/2164)
+* Added `baremetal/v1/nodes.Node.BIOSInterface` [GH-2164](https://github.com/gophercloud/gophercloud/pull/2164)
+* Added `baremetal/v1/nodes.NodeValidation.BIOS` [GH-2164](https://github.com/gophercloud/gophercloud/pull/2164)
+* Added `baremetal/v1/nodes.ListBIOSSettings` [GH-2171](https://github.com/gophercloud/gophercloud/pull/2171)
+* Added `baremetal/v1/nodes.GetBIOSSetting` [GH-2171](https://github.com/gophercloud/gophercloud/pull/2171)
+* Added `baremetal/v1/nodes.ListBIOSSettingsOpts` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.AttributeType` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.AllowableValues` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.LowerBound` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.UpperBound` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.MinLength` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.MaxLength` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.ReadOnly` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.ResetRequired` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+* Added `baremetal/v1/nodes.BIOSSetting.Unique` [GH-2174](https://github.com/gophercloud/gophercloud/pull/2174)
+
+## 0.17.0 (April 9, 2021)
+
+IMPROVEMENTS
+
+* `networking/v2/extensions/quotas.QuotaDetail.Reserved` can handle both `int` and `string` values [GH-2126](https://github.com/gophercloud/gophercloud/pull/2126)
+* Added `blockstorage/v3/volumetypes.ListExtraSpecs` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123)
+* Added `blockstorage/v3/volumetypes.GetExtraSpec` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123)
+* Added `blockstorage/v3/volumetypes.CreateExtraSpecs` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123)
+* Added `blockstorage/v3/volumetypes.UpdateExtraSpec` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123)
+* Added `blockstorage/v3/volumetypes.DeleteExtraSpec` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123)
+* Added `identity/v3/roles.ListAssignmentOpts.IncludeNames` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133)
+* Added `identity/v3/roles.AssignedRoles.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133)
+* Added `identity/v3/roles.Domain.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133)
+* Added `identity/v3/roles.Project.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133)
+* Added `identity/v3/roles.User.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133)
+* Added `identity/v3/roles.Group.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133)
+* Added `blockstorage/extensions/availabilityzones.List` [GH-2135](https://github.com/gophercloud/gophercloud/pull/2135)
+* Added `blockstorage/v3/volumetypes.ListAccesses` [GH-2138](https://github.com/gophercloud/gophercloud/pull/2138)
+* Added `blockstorage/v3/volumetypes.AddAccess` [GH-2138](https://github.com/gophercloud/gophercloud/pull/2138)
+* Added `blockstorage/v3/volumetypes.RemoveAccess` [GH-2138](https://github.com/gophercloud/gophercloud/pull/2138)
+* Added `blockstorage/v3/qos.Create` [GH-2140](https://github.com/gophercloud/gophercloud/pull/2140)
+* Added `blockstorage/v3/qos.Delete` [GH-2140](https://github.com/gophercloud/gophercloud/pull/2140)
+
+## 0.16.0 (February 23, 2021)
+
+UPGRADE NOTES
+
+* `baremetal/v1/nodes.CleanStep.Interface` has changed from `string` to `StepInterface` [GH-2120](https://github.com/gophercloud/gophercloud/pull/2120)
+
+BUG FIXES
+
+* Fixed `xor` logic issues in `loadbalancers/v2/l7policies.CreateOpts` [GH-2087](https://github.com/gophercloud/gophercloud/pull/2087)
+* Fixed `xor` logic issues in `loadbalancers/v2/listeners.CreateOpts` [GH-2087](https://github.com/gophercloud/gophercloud/pull/2087)
+* Fixed `If-Modified-Since` so it's correctly sent in a `objectstorage/v1/objects.Download` request [GH-2108](https://github.com/gophercloud/gophercloud/pull/2108)
+* Fixed `If-Unmodified-Since` so it's correctly sent in a `objectstorage/v1/objects.Download` request [GH-2108](https://github.com/gophercloud/gophercloud/pull/2108)
+
+IMPROVEMENTS
+
+* Added `blockstorage/extensions/limits.Get` [GH-2084](https://github.com/gophercloud/gophercloud/pull/2084)
+* `clustering/v1/clusters.RemoveNodes` now returns an `ActionResult` [GH-2089](https://github.com/gophercloud/gophercloud/pull/2089)
+* Added `identity/v3/projects.ListAvailable` [GH-2090](https://github.com/gophercloud/gophercloud/pull/2090)
+* Added `blockstorage/extensions/backups.ListDetail` [GH-2085](https://github.com/gophercloud/gophercloud/pull/2085)
+* Allow all ports to be removed in `networking/v2/extensions/fwaas_v2/groups.UpdateOpts` [GH-2073]
+* Added `imageservice/v2/images.ListOpts.Hidden` [GH-2094](https://github.com/gophercloud/gophercloud/pull/2094)
+* Added `imageservice/v2/images.CreateOpts.Hidden` [GH-2094](https://github.com/gophercloud/gophercloud/pull/2094)
+* Added `imageservice/v2/images.ReplaceImageHidden` [GH-2094](https://github.com/gophercloud/gophercloud/pull/2094)
+* Added `imageservice/v2/images.Image.Hidden` [GH-2094](https://github.com/gophercloud/gophercloud/pull/2094)
+* Added `containerinfra/v1/clusters.CreateOpts.MasterLBEnabled` [GH-2102](https://github.com/gophercloud/gophercloud/pull/2102)
+* Added the ability to define a custom function to handle "Retry-After" (429) responses [GH-2097](https://github.com/gophercloud/gophercloud/pull/2097)
+* Added `baremetal/v1/nodes.JBOD` constant for the `RAIDLevel` type [GH-2103](https://github.com/gophercloud/gophercloud/pull/2103)
+* Added support for Block Storage quotas of volume typed resources [GH-2109](https://github.com/gophercloud/gophercloud/pull/2109)
+* Added `blockstorage/extensions/volumeactions.ChangeType` [GH-2113](https://github.com/gophercloud/gophercloud/pull/2113)
+* Added `baremetal/v1/nodes.DeployStep` [GH-2120](https://github.com/gophercloud/gophercloud/pull/2120)
+* Added `baremetal/v1/nodes.ProvisionStateOpts.DeploySteps` [GH-2120](https://github.com/gophercloud/gophercloud/pull/2120)
+* Added `baremetal/v1/nodes.CreateOpts.AutomatedClean` [GH-2122](https://github.com/gophercloud/gophercloud/pull/2122)
## 0.15.0 (December 27, 2020)
diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go
index 953ca822a..e2623b44e 100644
--- a/vendor/github.com/gophercloud/gophercloud/doc.go
+++ b/vendor/github.com/gophercloud/gophercloud/doc.go
@@ -106,5 +106,44 @@ intermediary processing on each page, you can use the AllPages method:
This top-level package contains utility functions and data types that are used
throughout the provider and service packages. Of particular note for end users
are the AuthOptions and EndpointOpts structs.
+
+An example retry backoff function, which respects the 429 HTTP response code and a "Retry-After" header:
+
+ endpoint := "http://localhost:5000"
+ provider, err := openstack.NewClient(endpoint)
+ if err != nil {
+ panic(err)
+ }
+ provider.MaxBackoffRetries = 3 // max three retries
+ provider.RetryBackoffFunc = func(ctx context.Context, respErr *ErrUnexpectedResponseCode, e error, retries uint) error {
+ retryAfter := respErr.ResponseHeader.Get("Retry-After")
+ if retryAfter == "" {
+ return e
+ }
+
+ var sleep time.Duration
+
+ // Parse delay seconds or HTTP date
+ if v, err := strconv.ParseUint(retryAfter, 10, 32); err == nil {
+ sleep = time.Duration(v) * time.Second
+ } else if v, err := time.Parse(http.TimeFormat, retryAfter); err == nil {
+ sleep = time.Until(v)
+ } else {
+ return e
+ }
+
+ if ctx != nil {
+ select {
+ case <-time.After(sleep):
+ case <-ctx.Done():
+ return e
+ }
+ } else {
+ time.Sleep(sleep)
+ }
+
+ return nil
+ }
+
*/
package gophercloud
diff --git a/vendor/github.com/gophercloud/gophercloud/go.mod b/vendor/github.com/gophercloud/gophercloud/go.mod
index 64e2a0fb4..8c83df234 100644
--- a/vendor/github.com/gophercloud/gophercloud/go.mod
+++ b/vendor/github.com/gophercloud/gophercloud/go.mod
@@ -3,9 +3,7 @@ module github.com/gophercloud/gophercloud
go 1.13
require (
- github.com/kr/pretty v0.2.1 // indirect
- golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e
- golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 // indirect
- gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
+ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
+ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
gopkg.in/yaml.v2 v2.3.0
)
diff --git a/vendor/github.com/gophercloud/gophercloud/go.sum b/vendor/github.com/gophercloud/gophercloud/go.sum
index 311ab0449..c91f7ee22 100644
--- a/vendor/github.com/gophercloud/gophercloud/go.sum
+++ b/vendor/github.com/gophercloud/gophercloud/go.sum
@@ -1,19 +1,18 @@
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e h1:egKlR8l7Nu9vHGWbcUV8lqR4987UfUbBd7GbhqGzNYU=
-golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU=
-golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go
index 8f7d773e7..cb50014e8 100644
--- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go
+++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go
@@ -12,19 +12,44 @@ Example of Show Hypervisor Details
fmt.Printf("%+v\n", hypervisor)
-Example of Show Hypervisor Details with Compute API microversion greater than 2.53
+Example of Show Hypervisor Details when using Compute API microversion greater than 2.53
- hypervisorID := "c48f6247-abe4-4a24-824e-ea39e108874f"
- hypervisor, err := hypervisors.Get(computeClient, hypervisorID).Extract()
- if err != nil {
- panic(err)
- }
+ computeClient.Microversion = "2.53"
+
+ hypervisorID := "c48f6247-abe4-4a24-824e-ea39e108874f"
+ hypervisor, err := hypervisors.Get(computeClient, hypervisorID).Extract()
+ if err != nil {
+ panic(err)
+ }
fmt.Printf("%+v\n", hypervisor)
Example of Retrieving Details of All Hypervisors
- allPages, err := hypervisors.List(computeClient).AllPages()
+ allPages, err := hypervisors.List(computeClient, nil).AllPages()
+ if err != nil {
+ panic(err)
+ }
+
+ allHypervisors, err := hypervisors.ExtractHypervisors(allPages)
+ if err != nil {
+ panic(err)
+ }
+
+ for _, hypervisor := range allHypervisors {
+ fmt.Printf("%+v\n", hypervisor)
+ }
+
+Example of Retrieving Details of All Hypervisors when using Compute API microversion 2.33 or greater.
+
+ computeClient.Microversion = "2.53"
+
+ iTrue := true
+ listOpts := hypervisors.ListOpts{
+ WithServers: &true,
+ }
+
+ allPages, err := hypervisors.List(computeClient, listOpts).AllPages()
if err != nil {
panic(err)
}
@@ -59,11 +84,13 @@ Example of Show Hypervisor Uptime
Example of Show Hypervisor Uptime with Compute API microversion greater than 2.53
- hypervisorID := "c48f6247-abe4-4a24-824e-ea39e108874f"
- hypervisorUptime, err := hypervisors.GetUptime(computeClient, hypervisorID).Extract()
- if err != nil {
- panic(err)
- }
+ computeClient.Microversion = "2.53"
+
+ hypervisorID := "c48f6247-abe4-4a24-824e-ea39e108874f"
+ hypervisorUptime, err := hypervisors.GetUptime(computeClient, hypervisorID).Extract()
+ if err != nil {
+ panic(err)
+ }
fmt.Printf("%+v\n", hypervisorUptime)
*/
diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go
index a7047d98d..eac01cd46 100644
--- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go
+++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go
@@ -5,9 +5,52 @@ import (
"github.com/gophercloud/gophercloud/pagination"
)
+// ListOptsBuilder allows extensions to add additional parameters to the
+// List request.
+type ListOptsBuilder interface {
+ ToHypervisorListQuery() (string, error)
+}
+
+// ListOpts allows the filtering and sorting of paginated collections through
+// the API. Filtering is achieved by passing in struct field values that map to
+// the server attributes you want to see returned. Marker and Limit are used
+// for pagination.
+type ListOpts struct {
+ // Limit is an integer value for the limit of values to return.
+ // This requires microversion 2.33 or later.
+ Limit *int `q:"limit"`
+
+ // Marker is the ID of the last-seen item as a UUID.
+ // This requires microversion 2.53 or later.
+ Marker *string `q:"marker"`
+
+ // HypervisorHostnamePattern is the hypervisor hostname or a portion of it.
+ // This requires microversion 2.53 or later
+ HypervisorHostnamePattern *string `q:"hypervisor_hostname_pattern"`
+
+ // WithServers is a bool to include all servers which belong to each hypervisor
+ // This requires microversion 2.53 or later
+ WithServers *bool `q:"with_servers"`
+}
+
+// ToHypervisorListQuery formats a ListOpts into a query string.
+func (opts ListOpts) ToHypervisorListQuery() (string, error) {
+ q, err := gophercloud.BuildQueryString(opts)
+ return q.String(), err
+}
+
// List makes a request against the API to list hypervisors.
-func List(client *gophercloud.ServiceClient) pagination.Pager {
- return pagination.NewPager(client, hypervisorsListDetailURL(client), func(r pagination.PageResult) pagination.Page {
+func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
+ url := hypervisorsListDetailURL(client)
+ if opts != nil {
+ query, err := opts.ToHypervisorListQuery()
+ if err != nil {
+ return pagination.Pager{Err: err}
+ }
+ url += query
+ }
+
+ return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page {
return HypervisorPage{pagination.SinglePageBase(r)}
})
}
diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go
index 5dab0f3d3..6c172b4ca 100644
--- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go
+++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go
@@ -62,6 +62,12 @@ func (r *Service) UnmarshalJSON(b []byte) error {
return nil
}
+// Server represents an instance running on the hypervisor
+type Server struct {
+ Name string `json:"name"`
+ UUID string `json:"uuid"`
+}
+
// Hypervisor represents a hypervisor in the OpenStack cloud.
type Hypervisor struct {
// A structure that contains cpu information like arch, model, vendor,
@@ -123,6 +129,10 @@ type Hypervisor struct {
// Service is the service this hypervisor represents.
Service Service `json:"service"`
+ // Servers is a list of Server object.
+ // The requires microversion 2.53 or later.
+ Servers *[]Server `json:"servers"`
+
// VCPUs is the total number of vcpus on the hypervisor.
VCPUs int `json:"vcpus"`
diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go
index 4e6042409..72ec69e50 100644
--- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go
+++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go
@@ -83,6 +83,9 @@ type ListOpts struct {
// This requires the client to be set to microversion 2.26 or later.
// NotTagsAny filters on specific server tags. At least one of the tags must be absent for the server.
NotTagsAny string `q:"not-tags-any"`
+
+ // Display servers based on their availability zone (Admin only until microversion 2.82).
+ AvailabilityZone string `q:"availability_zone"`
}
// ToServerListQuery formats a ListOpts into a query string.
@@ -125,6 +128,13 @@ type Network struct {
// FixedIP specifies a fixed IPv4 address to be used on this network.
FixedIP string
+
+ // Tag may contain an optional device role tag for the server's virtual
+ // network interface. This can be used to identify network interfaces when
+ // multiple networks are connected to one server.
+ //
+ // Requires microversion 2.32 through 2.36 or 2.42 or later.
+ Tag string
}
// Personality is an array of files that are injected into the server at launch.
@@ -258,6 +268,9 @@ func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) {
if net.FixedIP != "" {
networks[i]["fixed_ip"] = net.FixedIP
}
+ if net.Tag != "" {
+ networks[i]["tag"] = net.Tag
+ }
}
b["networks"] = networks
}
diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go
index 53b3ecf27..916e59ae4 100644
--- a/vendor/github.com/gophercloud/gophercloud/provider_client.go
+++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go
@@ -13,7 +13,10 @@ import (
)
// DefaultUserAgent is the default User-Agent string set in the request header.
-const DefaultUserAgent = "gophercloud/2.0.0"
+const (
+ DefaultUserAgent = "gophercloud/2.0.0"
+ DefaultMaxBackoffRetries = 60
+)
// UserAgent represents a User-Agent header.
type UserAgent struct {
@@ -22,6 +25,14 @@ type UserAgent struct {
prepend []string
}
+type RetryBackoffFunc func(context.Context, *ErrUnexpectedResponseCode, error, uint) error
+
+// RetryFunc is a catch-all function for retrying failed API requests.
+// If it returns nil, the request will be retried. If it returns an error,
+// the request method will exit with that error. failCount is the number of
+// times the request has failed (starting at 1).
+type RetryFunc func(context context.Context, method, url string, options *RequestOpts, err error, failCount uint) error
+
// Prepend prepends a user-defined string to the default User-Agent string. Users
// may pass in one or more strings to prepend.
func (ua *UserAgent) Prepend(s ...string) {
@@ -80,6 +91,16 @@ type ProviderClient struct {
// Context is the context passed to the HTTP request.
Context context.Context
+ // Retry backoff func is called when rate limited.
+ RetryBackoffFunc RetryBackoffFunc
+
+ // MaxBackoffRetries set the maximum number of backoffs. When not set, defaults to DefaultMaxBackoffRetries
+ MaxBackoffRetries uint
+
+ // A general failed request handler method - this is always called in the end if a request failed. Leave as nil
+ // to abort when an error is encountered.
+ RetryFunc RetryFunc
+
// mut is a mutex for the client. It protects read and write access to client attributes such as getting
// and setting the TokenID.
mut *sync.RWMutex
@@ -323,6 +344,8 @@ type requestState struct {
// reauthenticate, but keep getting 401 responses with the fresh token, reauthenticating some more
// will just get us into an infinite loop.
hasReauthenticated bool
+ // Retry-After backoff counter, increments during each backoff call
+ retries uint
}
var applicationJSON = "application/json"
@@ -403,6 +426,16 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
// Issue the request.
resp, err := client.HTTPClient.Do(req)
if err != nil {
+ if client.RetryFunc != nil {
+ var e error
+ state.retries = state.retries + 1
+ e = client.RetryFunc(client.Context, method, url, options, err, state.retries)
+ if e != nil {
+ return nil, e
+ }
+
+ return client.doRequest(method, url, options, state)
+ }
return nil, err
}
@@ -427,7 +460,7 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
respErr := ErrUnexpectedResponseCode{
URL: url,
Method: method,
- Expected: options.OkCodes,
+ Expected: okc,
Actual: resp.StatusCode,
Body: body,
ResponseHeader: resp.Header,
@@ -498,11 +531,29 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
if error409er, ok := errType.(Err409er); ok {
err = error409er.Error409(respErr)
}
- case 429:
+ case http.StatusTooManyRequests, 498:
err = ErrDefault429{respErr}
if error429er, ok := errType.(Err429er); ok {
err = error429er.Error429(respErr)
}
+
+ maxTries := client.MaxBackoffRetries
+ if maxTries == 0 {
+ maxTries = DefaultMaxBackoffRetries
+ }
+
+ if f := client.RetryBackoffFunc; f != nil && state.retries < maxTries {
+ var e error
+
+ state.retries = state.retries + 1
+ e = f(client.Context, &respErr, err, state.retries)
+
+ if e != nil {
+ return resp, e
+ }
+
+ return client.doRequest(method, url, options, state)
+ }
case http.StatusInternalServerError:
err = ErrDefault500{respErr}
if error500er, ok := errType.(Err500er); ok {
@@ -519,6 +570,17 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
err = respErr
}
+ if err != nil && client.RetryFunc != nil {
+ var e error
+ state.retries = state.retries + 1
+ e = client.RetryFunc(client.Context, method, url, options, err, state.retries)
+ if e != nil {
+ return resp, e
+ }
+
+ return client.doRequest(method, url, options, state)
+ }
+
return resp, err
}
@@ -532,6 +594,16 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
return resp, err
}
if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil {
+ if client.RetryFunc != nil {
+ var e error
+ state.retries = state.retries + 1
+ e = client.RetryFunc(client.Context, method, url, options, err, state.retries)
+ if e != nil {
+ return resp, e
+ }
+
+ return client.doRequest(method, url, options, state)
+ }
return nil, err
}
}
diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go
index 7453feb8a..465e256e2 100644
--- a/vendor/github.com/hashicorp/consul/api/acl.go
+++ b/vendor/github.com/hashicorp/consul/api/acl.go
@@ -58,6 +58,7 @@ type ACLTokenListEntry struct {
CreateIndex uint64
ModifyIndex uint64
AccessorID string
+ SecretID string
Description string
Policies []*ACLTokenPolicyLink `json:",omitempty"`
Roles []*ACLTokenRoleLink `json:",omitempty"`
@@ -270,16 +271,61 @@ type ACLAuthMethodNamespaceRule struct {
type ACLAuthMethodListEntry struct {
Name string
Type string
- DisplayName string `json:",omitempty"`
- Description string `json:",omitempty"`
- CreateIndex uint64
- ModifyIndex uint64
+ DisplayName string `json:",omitempty"`
+ Description string `json:",omitempty"`
+ MaxTokenTTL time.Duration `json:",omitempty"`
+
+ // TokenLocality defines the kind of token that this auth method produces.
+ // This can be either 'local' or 'global'. If empty 'local' is assumed.
+ TokenLocality string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
// Namespace is the namespace the ACLAuthMethodListEntry is associated with.
// Namespacing is a Consul Enterprise feature.
Namespace string `json:",omitempty"`
}
+// This is nearly identical to the ACLAuthMethod MarshalJSON
+func (m *ACLAuthMethodListEntry) MarshalJSON() ([]byte, error) {
+ type Alias ACLAuthMethodListEntry
+ exported := &struct {
+ MaxTokenTTL string `json:",omitempty"`
+ *Alias
+ }{
+ MaxTokenTTL: m.MaxTokenTTL.String(),
+ Alias: (*Alias)(m),
+ }
+ if m.MaxTokenTTL == 0 {
+ exported.MaxTokenTTL = ""
+ }
+
+ return json.Marshal(exported)
+}
+
+// This is nearly identical to the ACLAuthMethod UnmarshalJSON
+func (m *ACLAuthMethodListEntry) UnmarshalJSON(data []byte) error {
+ type Alias ACLAuthMethodListEntry
+ aux := &struct {
+ MaxTokenTTL string
+ *Alias
+ }{
+ Alias: (*Alias)(m),
+ }
+
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ var err error
+ if aux.MaxTokenTTL != "" {
+ if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed
// KubernetesAuthMethodConfig.
func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) {
@@ -404,7 +450,7 @@ func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLToken
@@ -425,7 +471,7 @@ func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error)
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
@@ -446,7 +492,7 @@ func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -462,7 +508,7 @@ func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -478,7 +524,7 @@ func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
@@ -498,7 +544,7 @@ func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -524,7 +570,7 @@ func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -545,7 +591,7 @@ func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, e
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -568,7 +614,7 @@ func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMe
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLToken
@@ -593,7 +639,7 @@ func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMe
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLToken
@@ -620,7 +666,7 @@ func (a *ACL) TokenClone(tokenID string, description string, q *WriteOptions) (*
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLToken
@@ -640,7 +686,7 @@ func (a *ACL) TokenDelete(tokenID string, q *WriteOptions) (*WriteMeta, error) {
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -655,7 +701,7 @@ func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -679,7 +725,7 @@ func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -702,7 +748,7 @@ func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, erro
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -728,7 +774,7 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLPolicy
@@ -753,7 +799,7 @@ func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLPolicy
@@ -772,7 +818,7 @@ func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error)
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -786,7 +832,7 @@ func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMe
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -808,7 +854,7 @@ func (a *ACL) PolicyReadByName(policyName string, q *QueryOptions) (*ACLPolicy,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -835,7 +881,7 @@ func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, er
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -855,11 +901,12 @@ func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, er
func (a *ACL) RulesTranslate(rules io.Reader) (string, error) {
r := a.c.newRequest("POST", "/v1/acl/rules/translate")
r.body = rules
+ r.header.Set("Content-Type", "text/plain")
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return "", err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
@@ -883,7 +930,7 @@ func (a *ACL) RulesTranslateToken(tokenID string) (string, error) {
if err != nil {
return "", err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
@@ -910,7 +957,7 @@ func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLRole
@@ -935,7 +982,7 @@ func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLRole
@@ -954,7 +1001,7 @@ func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) {
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -968,7 +1015,7 @@ func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, er
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -994,7 +1041,7 @@ func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *Query
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -1022,7 +1069,7 @@ func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -1048,7 +1095,7 @@ func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuth
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLAuthMethod
@@ -1072,7 +1119,7 @@ func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuth
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLAuthMethod
@@ -1095,7 +1142,7 @@ func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta,
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -1113,7 +1160,7 @@ func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -1141,7 +1188,7 @@ func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *Query
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -1169,7 +1216,7 @@ func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBind
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLBindingRule
@@ -1194,7 +1241,7 @@ func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBind
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLBindingRule
@@ -1213,7 +1260,7 @@ func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMe
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -1227,7 +1274,7 @@ func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindin
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -1256,7 +1303,7 @@ func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBinding
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -1279,7 +1326,7 @@ func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMet
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLToken
@@ -1297,7 +1344,7 @@ func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) {
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -1317,7 +1364,7 @@ func (a *ACL) OIDCAuthURL(auth *ACLOIDCAuthURLParams, q *WriteOptions) (string,
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out aclOIDCAuthURLResponse
@@ -1352,7 +1399,7 @@ func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLTo
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out ACLToken
diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go
index a4cc143f0..2d1366019 100644
--- a/vendor/github.com/hashicorp/consul/api/agent.go
+++ b/vendor/github.com/hashicorp/consul/api/agent.go
@@ -3,6 +3,7 @@ package api
import (
"bufio"
"bytes"
+ "context"
"fmt"
"io"
"net/http"
@@ -62,6 +63,7 @@ type AgentCheck struct {
ServiceID string
ServiceName string
Type string
+ ExposedPort int
Definition HealthCheckDefinition
Namespace string `json:",omitempty"`
}
@@ -81,6 +83,7 @@ type AgentService struct {
Meta map[string]string
Port int
Address string
+ SocketPath string
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
Weights AgentWeights
EnableTagOverride bool
@@ -113,14 +116,17 @@ type AgentServiceConnect struct {
// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
// ServiceDefinition or response.
type AgentServiceConnectProxyConfig struct {
- DestinationServiceName string `json:",omitempty"`
- DestinationServiceID string `json:",omitempty"`
- LocalServiceAddress string `json:",omitempty"`
- LocalServicePort int `json:",omitempty"`
- Config map[string]interface{} `json:",omitempty" bexpr:"-"`
- Upstreams []Upstream `json:",omitempty"`
- MeshGateway MeshGatewayConfig `json:",omitempty"`
- Expose ExposeConfig `json:",omitempty"`
+ DestinationServiceName string `json:",omitempty"`
+ DestinationServiceID string `json:",omitempty"`
+ LocalServiceAddress string `json:",omitempty"`
+ LocalServicePort int `json:",omitempty"`
+ LocalServiceSocketPath string `json:",omitempty"`
+ Mode ProxyMode `json:",omitempty"`
+ TransparentProxy *TransparentProxyConfig `json:",omitempty"`
+ Config map[string]interface{} `json:",omitempty" bexpr:"-"`
+ Upstreams []Upstream `json:",omitempty"`
+ MeshGateway MeshGatewayConfig `json:",omitempty"`
+ Expose ExposeConfig `json:",omitempty"`
}
const (
@@ -255,6 +261,7 @@ type AgentServiceRegistration struct {
Tags []string `json:",omitempty"`
Port int `json:",omitempty"`
Address string `json:",omitempty"`
+ SocketPath string `json:",omitempty"`
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
EnableTagOverride bool `json:",omitempty"`
Meta map[string]string `json:",omitempty"`
@@ -266,12 +273,23 @@ type AgentServiceRegistration struct {
Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
}
-//ServiceRegisterOpts is used to pass extra options to the service register.
+// ServiceRegisterOpts is used to pass extra options to the service register.
type ServiceRegisterOpts struct {
//Missing healthchecks will be deleted from the agent.
//Using this parameter allows to idempotently register a service and its checks without
//having to manually deregister checks.
ReplaceExistingChecks bool
+
+ // ctx is an optional context pass through to the underlying HTTP
+ // request layer. Use WithContext() to set the context.
+ ctx context.Context
+}
+
+// WithContext sets the context to be used for the request on a new ServiceRegisterOpts,
+// and returns the opts.
+func (o ServiceRegisterOpts) WithContext(ctx context.Context) ServiceRegisterOpts {
+ o.ctx = ctx
+ return o
}
// AgentCheckRegistration is used to register a new check
@@ -301,6 +319,7 @@ type AgentServiceCheck struct {
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
Notes string `json:",omitempty"`
+ TLSServerName string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
GRPC string `json:",omitempty"`
GRPCUseTLS bool `json:",omitempty"`
@@ -392,8 +411,11 @@ type Upstream struct {
Datacenter string `json:",omitempty"`
LocalBindAddress string `json:",omitempty"`
LocalBindPort int `json:",omitempty"`
+ LocalBindSocketPath string `json:",omitempty"`
+ LocalBindSocketMode string `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
+ CentrallyConfigured bool `json:",omitempty" bexpr:"-"`
}
// Agent can be used to query the Agent endpoints
@@ -417,7 +439,7 @@ func (a *Agent) Self() (map[string]map[string]interface{}, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out map[string]map[string]interface{}
if err := decodeBody(resp, &out); err != nil {
@@ -435,7 +457,7 @@ func (a *Agent) Host() (map[string]interface{}, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out map[string]interface{}
if err := decodeBody(resp, &out); err != nil {
@@ -452,7 +474,7 @@ func (a *Agent) Metrics() (*MetricsInfo, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out *MetricsInfo
if err := decodeBody(resp, &out); err != nil {
@@ -468,7 +490,7 @@ func (a *Agent) Reload() error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -494,13 +516,20 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) {
// ChecksWithFilter returns a subset of the locally registered checks that match
// the given filter expression
func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
+ return a.ChecksWithFilterOpts(filter, nil)
+}
+
+// ChecksWithFilterOpts returns a subset of the locally registered checks that match
+// the given filter expression and QueryOptions.
+func (a *Agent) ChecksWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentCheck, error) {
r := a.c.newRequest("GET", "/v1/agent/checks")
+ r.setQueryOptions(q)
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out map[string]*AgentCheck
if err := decodeBody(resp, &out); err != nil {
@@ -517,13 +546,20 @@ func (a *Agent) Services() (map[string]*AgentService, error) {
// ServicesWithFilter returns a subset of the locally registered services that match
// the given filter expression
func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
+ return a.ServicesWithFilterOpts(filter, nil)
+}
+
+// ServicesWithFilterOpts returns a subset of the locally registered services that match
+// the given filter expression and QueryOptions.
+func (a *Agent) ServicesWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentService, error) {
r := a.c.newRequest("GET", "/v1/agent/services")
+ r.setQueryOptions(q)
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out map[string]*AgentService
if err := decodeBody(resp, &out); err != nil {
@@ -546,7 +582,7 @@ func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceC
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
// Service not Found
if resp.StatusCode == http.StatusNotFound {
return HealthCritical, nil, nil
@@ -580,7 +616,7 @@ func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentService
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
// Service not Found
if resp.StatusCode == http.StatusNotFound {
return HealthCritical, nil, nil
@@ -613,7 +649,7 @@ func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *Quer
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -638,7 +674,7 @@ func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
@@ -660,7 +696,7 @@ func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
@@ -688,6 +724,7 @@ func (a *Agent) ServiceRegisterOpts(service *AgentServiceRegistration, opts Serv
func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceRegisterOpts) error {
r := a.c.newRequest("PUT", "/v1/agent/service/register")
r.obj = service
+ r.ctx = opts.ctx
if opts.ReplaceExistingChecks {
r.params.Set("replace-existing-checks", "true")
}
@@ -695,7 +732,7 @@ func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceR
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -707,7 +744,20 @@ func (a *Agent) ServiceDeregister(serviceID string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
+ return nil
+}
+
+// ServiceDeregisterOpts is used to deregister a service with
+// the local agent with QueryOptions.
+func (a *Agent) ServiceDeregisterOpts(serviceID string, q *QueryOptions) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+ r.setQueryOptions(q)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ closeResponseBody(resp)
return nil
}
@@ -762,7 +812,7 @@ func (a *Agent) updateTTL(checkID, note, status string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -785,6 +835,10 @@ type checkUpdate struct {
// strings for compatibility (though a newer version of Consul will still be
// required to use this API).
func (a *Agent) UpdateTTL(checkID, output, status string) error {
+ return a.UpdateTTLOpts(checkID, output, status, nil)
+}
+
+func (a *Agent) UpdateTTLOpts(checkID, output, status string, q *QueryOptions) error {
switch status {
case "pass", HealthPassing:
status = HealthPassing
@@ -798,6 +852,7 @@ func (a *Agent) UpdateTTL(checkID, output, status string) error {
endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
r := a.c.newRequest("PUT", endpoint)
+ r.setQueryOptions(q)
r.obj = &checkUpdate{
Status: status,
Output: output,
@@ -807,7 +862,7 @@ func (a *Agent) UpdateTTL(checkID, output, status string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -820,19 +875,26 @@ func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
// CheckDeregister is used to deregister a check with
// the local agent
func (a *Agent) CheckDeregister(checkID string) error {
+ return a.CheckDeregisterOpts(checkID, nil)
+}
+
+// CheckDeregisterOpts is used to deregister a check with
+// the local agent using query options
+func (a *Agent) CheckDeregisterOpts(checkID string, q *QueryOptions) error {
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+ r.setQueryOptions(q)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -847,7 +909,7 @@ func (a *Agent) Join(addr string, wan bool) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -858,7 +920,7 @@ func (a *Agent) Leave() error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -869,7 +931,7 @@ func (a *Agent) ForceLeave(node string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -882,7 +944,7 @@ func (a *Agent) ForceLeavePrune(node string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -895,7 +957,7 @@ func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, e
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out AgentAuthorize
if err := decodeBody(resp, &out); err != nil {
@@ -912,7 +974,7 @@ func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error)
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -933,7 +995,7 @@ func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *Qu
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -956,7 +1018,7 @@ func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -969,7 +1031,7 @@ func (a *Agent) DisableServiceMaintenance(serviceID string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -983,7 +1045,7 @@ func (a *Agent) EnableNodeMaintenance(reason string) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -996,7 +1058,7 @@ func (a *Agent) DisableNodeMaintenance() error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -1027,7 +1089,7 @@ func (a *Agent) monitor(loglevel string, logJSON bool, stopCh <-chan struct{}, q
}
logCh := make(chan string, 64)
go func() {
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
scanner := bufio.NewScanner(resp.Body)
for {
select {
@@ -1135,7 +1197,7 @@ func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMe
if err != nil {
return nil, 0, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
index 08f00c406..a35980a9a 100644
--- a/vendor/github.com/hashicorp/consul/api/api.go
+++ b/vendor/github.com/hashicorp/consul/api/api.go
@@ -14,6 +14,7 @@ import (
"os"
"strconv"
"strings"
+ "sync"
"time"
"github.com/hashicorp/go-cleanhttp"
@@ -548,9 +549,48 @@ func (c *Config) GenerateEnv() []string {
// Client provides a client to the Consul API
type Client struct {
+ modifyLock sync.RWMutex
+ headers http.Header
+
config Config
}
+// Headers gets the current set of headers used for requests. This returns a
+// copy; to modify it call AddHeader or SetHeaders.
+func (c *Client) Headers() http.Header {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
+ if c.headers == nil {
+ return nil
+ }
+
+ ret := make(http.Header)
+ for k, v := range c.headers {
+ for _, val := range v {
+ ret[k] = append(ret[k], val)
+ }
+ }
+
+ return ret
+}
+
+// AddHeader allows a single header key/value pair to be added
+// in a race-safe fashion.
+func (c *Client) AddHeader(key, value string) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+ c.headers.Add(key, value)
+}
+
+// SetHeaders clears all previous headers and uses only the given
+// ones going forward.
+func (c *Client) SetHeaders(headers http.Header) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+ c.headers = headers
+}
+
// NewClient returns a new client
func NewClient(config *Config) (*Client, error) {
// bootstrap the config
@@ -640,7 +680,7 @@ func NewClient(config *Config) (*Client, error) {
config.Token = defConfig.Token
}
- return &Client{config: *config}, nil
+ return &Client{config: *config, headers: make(http.Header)}, nil
}
// NewHttpClient returns an http client configured with the given Transport and TLS
@@ -831,6 +871,12 @@ func (r *request) toHTTP() (*http.Request, error) {
req.Host = r.url.Host
req.Header = r.header
+ // Content-Type must always be set when a body is present
+ // See https://github.com/hashicorp/consul/issues/10011
+ if req.Body != nil && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "application/json")
+ }
+
// Setup auth
if r.config.HttpAuth != nil {
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
@@ -853,8 +899,9 @@ func (c *Client) newRequest(method, path string) *request {
Path: path,
},
params: make(map[string][]string),
- header: make(http.Header),
+ header: c.Headers(),
}
+
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
}
@@ -892,7 +939,7 @@ func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*Quer
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -914,7 +961,7 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
if out != nil {
@@ -1008,7 +1055,7 @@ func encodeBody(obj interface{}) (io.Reader, error) {
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
if e != nil {
if resp != nil {
- resp.Body.Close()
+ closeResponseBody(resp)
}
return d, nil, e
}
@@ -1018,6 +1065,14 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h
return d, resp, nil
}
+// closeResponseBody reads resp.Body until EOF, and then closes it. The read
+// is necessary to ensure that the http.Client's underlying RoundTripper is able
+// to re-use the TCP connection. See godoc on net/http.Client.Do.
+func closeResponseBody(resp *http.Response) error {
+ _, _ = io.Copy(ioutil.Discard, resp.Body)
+ return resp.Body.Close()
+}
+
func (req *request) filterQuery(filter string) {
if filter == "" {
return
@@ -1032,14 +1087,14 @@ func (req *request) filterQuery(filter string) {
func generateUnexpectedResponseCodeError(resp *http.Response) error {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
- resp.Body.Close()
+ closeResponseBody(resp)
return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
if e != nil {
if resp != nil {
- resp.Body.Close()
+ closeResponseBody(resp)
}
return false, d, nil, e
}
diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go
index 607d5d065..b8588d828 100644
--- a/vendor/github.com/hashicorp/consul/api/catalog.go
+++ b/vendor/github.com/hashicorp/consul/api/catalog.go
@@ -122,7 +122,7 @@ func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMet
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -138,7 +138,7 @@ func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*Wr
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -153,7 +153,7 @@ func (c *Catalog) Datacenters() ([]string, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out []string
if err := decodeBody(resp, &out); err != nil {
@@ -170,7 +170,7 @@ func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -191,7 +191,7 @@ func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, er
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -248,7 +248,7 @@ func (c *Catalog) service(service string, tags []string, q *QueryOptions, connec
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -269,7 +269,7 @@ func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -293,7 +293,7 @@ func (c *Catalog) NodeServiceList(node string, q *QueryOptions) (*CatalogNodeSer
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -314,7 +314,7 @@ func (c *Catalog) GatewayServices(gateway string, q *QueryOptions) ([]*GatewaySe
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go
index f5ef60e29..e28c7dc18 100644
--- a/vendor/github.com/hashicorp/consul/api/config_entry.go
+++ b/vendor/github.com/hashicorp/consul/api/config_entry.go
@@ -21,8 +21,10 @@ const (
IngressGateway string = "ingress-gateway"
TerminatingGateway string = "terminating-gateway"
ServiceIntentions string = "service-intentions"
+ MeshConfig string = "mesh"
ProxyConfigGlobal string = "global"
+ MeshConfigMesh string = "mesh"
)
type ConfigEntry interface {
@@ -46,8 +48,8 @@ const (
// should be direct and not flow through a mesh gateway.
MeshGatewayModeNone MeshGatewayMode = "none"
- // MeshGatewayModeLocal represents that the Upstrea Connect connections
- // should be made to a mesh gateway in the local datacenter. This is
+ // MeshGatewayModeLocal represents that the Upstream Connect connections
+ // should be made to a mesh gateway in the local datacenter.
MeshGatewayModeLocal MeshGatewayMode = "local"
// MeshGatewayModeRemote represents that the Upstream Connect connections
@@ -62,6 +64,33 @@ type MeshGatewayConfig struct {
Mode MeshGatewayMode `json:",omitempty"`
}
+type ProxyMode string
+
+const (
+ // ProxyModeDefault represents no specific mode and should
+ // be used to indicate that a different layer of the configuration
+ // chain should take precedence
+ ProxyModeDefault ProxyMode = ""
+
+ // ProxyModeTransparent represents that inbound and outbound application
+ // traffic is being captured and redirected through the proxy.
+ ProxyModeTransparent ProxyMode = "transparent"
+
+ // ProxyModeDirect represents that the proxy's listeners must be dialed directly
+ // by the local application and other proxies.
+ ProxyModeDirect ProxyMode = "direct"
+)
+
+type TransparentProxyConfig struct {
+ // The port of the listener where outbound application traffic is being redirected to.
+ OutboundListenerPort int `json:",omitempty" alias:"outbound_listener_port"`
+
+ // DialedDirectly indicates whether transparent proxies can dial this proxy instance directly.
+ // The discovery chain is not considered when dialing a service instance directly.
+ // This setting is useful when addressing stateful services, such as a database cluster with a leader node.
+ DialedDirectly bool `json:",omitempty" alias:"dialed_directly"`
+}
+
// ExposeConfig describes HTTP paths to expose through Envoy outside of Connect.
// Users can expose individual paths and/or all HTTP/GRPC paths for checks.
type ExposeConfig struct {
@@ -91,14 +120,100 @@ type ExposePath struct {
ParsedFromCheck bool
}
+type UpstreamConfiguration struct {
+ // Overrides is a slice of per-service configuration. The name field is
+ // required.
+ Overrides []*UpstreamConfig `json:",omitempty"`
+
+ // Defaults contains default configuration for all upstreams of a given
+ // service. The name field must be empty.
+ Defaults *UpstreamConfig `json:",omitempty"`
+}
+
+type UpstreamConfig struct {
+ // Name is only accepted within a service-defaults config entry.
+ Name string `json:",omitempty"`
+ // Namespace is only accepted within a service-defaults config entry.
+ Namespace string `json:",omitempty"`
+
+ // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's
+ // listener.
+ //
+ // Note: This escape hatch is NOT compatible with the discovery chain and
+ // will be ignored if a discovery chain is active.
+ EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"`
+
+ // EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's
+ // cluster. The Connect client TLS certificate and context will be injected
+ // overriding any TLS settings present.
+ //
+ // Note: This escape hatch is NOT compatible with the discovery chain and
+ // will be ignored if a discovery chain is active.
+ EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"`
+
+ // Protocol describes the upstream's service protocol. Valid values are "tcp",
+ // "http" and "grpc". Anything else is treated as tcp. The enables protocol
+ // aware features like per-request metrics and connection pooling, tracing,
+ // routing etc.
+ Protocol string `json:",omitempty"`
+
+ // ConnectTimeoutMs is the number of milliseconds to timeout making a new
+ // connection to this upstream. Defaults to 5000 (5 seconds) if not set.
+ ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"`
+
+ // Limits are the set of limits that are applied to the proxy for a specific upstream of a
+ // service instance.
+ Limits *UpstreamLimits `json:",omitempty"`
+
+ // PassiveHealthCheck configuration determines how upstream proxy instances will
+ // be monitored for removal from the load balancing pool.
+ PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"`
+
+ // MeshGatewayConfig controls how Mesh Gateways are configured and used
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" `
+}
+
+type PassiveHealthCheck struct {
+ // Interval between health check analysis sweeps. Each sweep may remove
+ // hosts or return hosts to the pool.
+ Interval time.Duration `json:",omitempty"`
+
+ // MaxFailures is the count of consecutive failures that results in a host
+ // being removed from the pool.
+ MaxFailures uint32 `alias:"max_failures"`
+}
+
+// UpstreamLimits describes the limits that are associated with a specific
+// upstream of a service instance.
+type UpstreamLimits struct {
+ // MaxConnections is the maximum number of connections the local proxy can
+ // make to the upstream service.
+ MaxConnections *int `alias:"max_connections"`
+
+ // MaxPendingRequests is the maximum number of requests that will be queued
+ // waiting for an available connection. This is mostly applicable to HTTP/1.1
+ // clusters since all HTTP/2 requests are streamed over a single
+ // connection.
+ MaxPendingRequests *int `alias:"max_pending_requests"`
+
+ // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed
+ // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2
+ // clusters since all HTTP/1.1 requests are limited by MaxConnections.
+ MaxConcurrentRequests *int `alias:"max_concurrent_requests"`
+}
+
type ServiceConfigEntry struct {
- Kind string
- Name string
- Namespace string `json:",omitempty"`
- Protocol string `json:",omitempty"`
- MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
- Expose ExposeConfig `json:",omitempty"`
- ExternalSNI string `json:",omitempty" alias:"external_sni"`
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+ Protocol string `json:",omitempty"`
+ Mode ProxyMode `json:",omitempty"`
+ TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"`
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
+ Expose ExposeConfig `json:",omitempty"`
+ ExternalSNI string `json:",omitempty" alias:"external_sni"`
+ UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"`
+
Meta map[string]string `json:",omitempty"`
CreateIndex uint64
ModifyIndex uint64
@@ -129,15 +244,17 @@ func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
}
type ProxyConfigEntry struct {
- Kind string
- Name string
- Namespace string `json:",omitempty"`
- Config map[string]interface{} `json:",omitempty"`
- MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
- Expose ExposeConfig `json:",omitempty"`
- Meta map[string]string `json:",omitempty"`
- CreateIndex uint64
- ModifyIndex uint64
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+ Mode ProxyMode `json:",omitempty"`
+ TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"`
+ Config map[string]interface{} `json:",omitempty"`
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
+ Expose ExposeConfig `json:",omitempty"`
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
}
func (p *ProxyConfigEntry) GetKind() string {
@@ -182,6 +299,8 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) {
return &TerminatingGatewayConfigEntry{Kind: kind, Name: name}, nil
case ServiceIntentions:
return &ServiceIntentionsConfigEntry{Kind: kind, Name: name}, nil
+ case MeshConfig:
+ return &MeshConfigEntry{}, nil
default:
return nil, fmt.Errorf("invalid config entry kind: %s", kind)
}
@@ -288,7 +407,7 @@ func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (Confi
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -313,7 +432,7 @@ func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *Q
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -351,7 +470,7 @@ func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *W
if err != nil {
return false, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
@@ -374,7 +493,7 @@ func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*W
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_cluster.go b/vendor/github.com/hashicorp/consul/api/config_entry_cluster.go
new file mode 100644
index 000000000..9ec18ea67
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/config_entry_cluster.go
@@ -0,0 +1,53 @@
+package api
+
+import "encoding/json"
+
+type MeshConfigEntry struct {
+ Namespace string `json:",omitempty"`
+ TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"`
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+type TransparentProxyMeshConfig struct {
+ MeshDestinationsOnly bool `alias:"mesh_destinations_only"`
+}
+
+func (e *MeshConfigEntry) GetKind() string {
+ return MeshConfig
+}
+
+func (e *MeshConfigEntry) GetName() string {
+ return MeshConfigMesh
+}
+
+func (e *MeshConfigEntry) GetNamespace() string {
+ return e.Namespace
+}
+
+func (e *MeshConfigEntry) GetMeta() map[string]string {
+ return e.Meta
+}
+
+func (e *MeshConfigEntry) GetCreateIndex() uint64 {
+ return e.CreateIndex
+}
+
+func (e *MeshConfigEntry) GetModifyIndex() uint64 {
+ return e.ModifyIndex
+}
+
+// MarshalJSON adds the Kind field so that the JSON can be decoded back into the
+// correct type.
+func (e *MeshConfigEntry) MarshalJSON() ([]byte, error) {
+ type Alias MeshConfigEntry
+ source := &struct {
+ Kind string
+ *Alias
+ }{
+ Kind: MeshConfig,
+ Alias: (*Alias)(e),
+ }
+ return json.Marshal(source)
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go
index 26a7bfb1d..37e53d96e 100644
--- a/vendor/github.com/hashicorp/consul/api/connect_ca.go
+++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go
@@ -23,6 +23,14 @@ type CAConfig struct {
// configuration is an error.
State map[string]string
+ // ForceWithoutCrossSigning indicates that the CA reconfiguration should go
+ // ahead even if the current CA is unable to cross sign certificates. This
+ // risks temporary connection failures during the rollout as new leafs will be
+ // rejected by proxies that have not yet observed the new root cert but is the
+ // only option if a CA that doesn't support cross signing needs to be
+ // reconfigured or mirated away from.
+ ForceWithoutCrossSigning bool
+
CreateIndex uint64
ModifyIndex uint64
}
@@ -41,7 +49,6 @@ type ConsulCAProviderConfig struct {
PrivateKey string
RootCert string
- RotationPeriod time.Duration
IntermediateCertTTL time.Duration
}
@@ -130,7 +137,7 @@ func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -151,7 +158,7 @@ func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -173,7 +180,7 @@ func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, erro
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go
index 26fb6cc4b..d1f0b6530 100644
--- a/vendor/github.com/hashicorp/consul/api/connect_intention.go
+++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go
@@ -170,7 +170,7 @@ func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error)
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -194,7 +194,7 @@ func (h *Connect) IntentionGetExact(source, destination string, q *QueryOptions)
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -226,7 +226,7 @@ func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMe
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -259,7 +259,7 @@ func (h *Connect) IntentionDeleteExact(source, destination string, q *WriteOptio
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &WriteMeta{}
qm.RequestTime = rtt
@@ -277,7 +277,7 @@ func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &WriteMeta{}
qm.RequestTime = rtt
@@ -303,7 +303,7 @@ func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[str
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -330,7 +330,7 @@ func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *
if err != nil {
return false, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -355,7 +355,7 @@ func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta,
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -382,7 +382,7 @@ func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *Wri
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -406,7 +406,7 @@ func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta,
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go
index 776630f67..32c7822c1 100644
--- a/vendor/github.com/hashicorp/consul/api/coordinate.go
+++ b/vendor/github.com/hashicorp/consul/api/coordinate.go
@@ -37,7 +37,7 @@ func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out []*CoordinateDatacenterMap
if err := decodeBody(resp, &out); err != nil {
@@ -54,7 +54,7 @@ func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, err
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -76,7 +76,7 @@ func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -92,7 +92,7 @@ func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *Qu
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go
index 238046853..56dcc9bcd 100644
--- a/vendor/github.com/hashicorp/consul/api/debug.go
+++ b/vendor/github.com/hashicorp/consul/api/debug.go
@@ -27,7 +27,11 @@ func (d *Debug) Heap() ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
+
+ if resp.StatusCode != 200 {
+ return nil, generateUnexpectedResponseCodeError(resp)
+ }
// We return a raw response because we're just passing through a response
// from the pprof handlers
@@ -50,7 +54,11 @@ func (d *Debug) Profile(seconds int) ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
+
+ if resp.StatusCode != 200 {
+ return nil, generateUnexpectedResponseCodeError(resp)
+ }
// We return a raw response because we're just passing through a response
// from the pprof handlers
@@ -73,7 +81,11 @@ func (d *Debug) Trace(seconds int) ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
+
+ if resp.StatusCode != 200 {
+ return nil, generateUnexpectedResponseCodeError(resp)
+ }
// We return a raw response because we're just passing through a response
// from the pprof handlers
@@ -93,7 +105,11 @@ func (d *Debug) Goroutine() ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
+
+ if resp.StatusCode != 200 {
+ return nil, generateUnexpectedResponseCodeError(resp)
+ }
// We return a raw response because we're just passing through a response
// from the pprof handlers
diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go
index f67f881c2..b78e6c3c4 100644
--- a/vendor/github.com/hashicorp/consul/api/discovery_chain.go
+++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go
@@ -43,7 +43,7 @@ func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryO
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go
index 85b5b069b..1da41375c 100644
--- a/vendor/github.com/hashicorp/consul/api/event.go
+++ b/vendor/github.com/hashicorp/consul/api/event.go
@@ -45,12 +45,13 @@ func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, er
if params.Payload != nil {
r.body = bytes.NewReader(params.Payload)
}
+ r.header.Set("Content-Type", "application/octet-stream")
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out UserEvent
@@ -74,7 +75,7 @@ func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, er
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod
index 89e6e0c94..348ad8a73 100644
--- a/vendor/github.com/hashicorp/consul/api/go.mod
+++ b/vendor/github.com/hashicorp/consul/api/go.mod
@@ -5,7 +5,7 @@ go 1.12
replace github.com/hashicorp/consul/sdk => ../sdk
require (
- github.com/hashicorp/consul/sdk v0.7.0
+ github.com/hashicorp/consul/sdk v0.8.0
github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-hclog v0.12.0
github.com/hashicorp/go-rootcerts v1.0.2
diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum
index 57ef54399..b95bd4744 100644
--- a/vendor/github.com/hashicorp/consul/api/go.sum
+++ b/vendor/github.com/hashicorp/consul/api/go.sum
@@ -7,7 +7,6 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
@@ -23,7 +22,6 @@ github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxB
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
@@ -49,18 +47,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
@@ -83,11 +78,11 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
@@ -97,22 +92,18 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -121,11 +112,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go
index 99b9ac257..0a3fd8dd1 100644
--- a/vendor/github.com/hashicorp/consul/api/health.go
+++ b/vendor/github.com/hashicorp/consul/api/health.go
@@ -58,6 +58,7 @@ type HealthCheckDefinition struct {
Header map[string][]string
Method string
Body string
+ TLSServerName string
TLSSkipVerify bool
TCP string
IntervalDuration time.Duration `json:"-"`
@@ -233,7 +234,7 @@ func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, e
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -254,7 +255,7 @@ func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMe
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -331,7 +332,7 @@ func (h *Health) service(service string, tags []string, passingOnly bool, q *Que
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -361,7 +362,7 @@ func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go
index 351d287d6..1d5c11295 100644
--- a/vendor/github.com/hashicorp/consul/api/kv.go
+++ b/vendor/github.com/hashicorp/consul/api/kv.go
@@ -69,7 +69,7 @@ func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
if resp == nil {
return nil, qm, nil
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
@@ -90,7 +90,7 @@ func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
if resp == nil {
return nil, qm, nil
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
@@ -113,7 +113,7 @@ func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMe
if resp == nil {
return nil, qm, nil
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var entries []string
if err := decodeBody(resp, &entries); err != nil {
@@ -138,10 +138,10 @@ func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
- resp.Body.Close()
+ closeResponseBody(resp)
return nil, qm, nil
} else if resp.StatusCode != 200 {
- resp.Body.Close()
+ closeResponseBody(resp)
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
return resp, qm, nil
@@ -205,11 +205,12 @@ func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOpti
r.params.Set(param, val)
}
r.body = bytes.NewReader(body)
+ r.header.Set("Content-Type", "application/octet-stream")
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &WriteMeta{}
qm.RequestTime = rtt
@@ -253,7 +254,7 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption
if err != nil {
return false, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &WriteMeta{}
qm.RequestTime = rtt
diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go
index 49782d2a8..20f6c8d5c 100644
--- a/vendor/github.com/hashicorp/consul/api/namespace.go
+++ b/vendor/github.com/hashicorp/consul/api/namespace.go
@@ -67,7 +67,7 @@ func (n *Namespaces) Create(ns *Namespace, q *WriteOptions) (*Namespace, *WriteM
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out Namespace
@@ -90,7 +90,7 @@ func (n *Namespaces) Update(ns *Namespace, q *WriteOptions) (*Namespace, *WriteM
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
var out Namespace
@@ -109,7 +109,7 @@ func (n *Namespaces) Read(name string, q *QueryOptions) (*Namespace, *QueryMeta,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
@@ -132,7 +132,7 @@ func (n *Namespaces) Delete(name string, q *WriteOptions) (*WriteMeta, error) {
if err != nil {
return nil, err
}
- resp.Body.Close()
+ closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
@@ -146,7 +146,7 @@ func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go
index 5cf7e4973..5476f5c5b 100644
--- a/vendor/github.com/hashicorp/consul/api/operator_area.go
+++ b/vendor/github.com/hashicorp/consul/api/operator_area.go
@@ -93,7 +93,7 @@ func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta,
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -114,7 +114,7 @@ func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (stri
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -154,7 +154,7 @@ func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, erro
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -171,7 +171,7 @@ func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions)
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
index 57876ee9f..8175f5133 100644
--- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
+++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
@@ -284,7 +284,7 @@ func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfig
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out AutopilotConfiguration
if err := decodeBody(resp, &out); err != nil {
@@ -303,7 +303,7 @@ func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *W
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -319,7 +319,7 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W
if err != nil {
return false, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
@@ -334,11 +334,24 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
r.setQueryOptions(q)
- _, resp, err := requireOK(op.c.doRequest(r))
+
+ // we cannot just use requireOK because this endpoint might use a 429 status to indicate
+ // that unhealthiness
+ _, resp, err := op.c.doRequest(r)
if err != nil {
+ if resp != nil {
+ closeResponseBody(resp)
+ }
return nil, err
}
- defer resp.Body.Close()
+
+ // these are the only 2 status codes that would indicate that we should
+ // expect the body to contain the right format.
+ if resp.StatusCode != 200 && resp.StatusCode != 429 {
+ return nil, generateUnexpectedResponseCodeError(resp)
+ }
+
+ defer closeResponseBody(resp)
var out OperatorHealthReply
if err := decodeBody(resp, &out); err != nil {
@@ -354,7 +367,7 @@ func (op *Operator) AutopilotState(q *QueryOptions) (*AutopilotState, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out AutopilotState
if err := decodeBody(resp, &out); err != nil {
diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go
index 5b80f9f91..baad70eed 100644
--- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go
+++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go
@@ -40,7 +40,7 @@ func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -52,7 +52,7 @@ func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out []*KeyringResponse
if err := decodeBody(resp, &out); err != nil {
@@ -72,7 +72,7 @@ func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -87,6 +87,6 @@ func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
if err != nil {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go
index 51b64cef4..73e5051ba 100644
--- a/vendor/github.com/hashicorp/consul/api/operator_license.go
+++ b/vendor/github.com/hashicorp/consul/api/operator_license.go
@@ -66,7 +66,7 @@ func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) {
if err != nil {
return "", err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
@@ -78,6 +78,9 @@ func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) {
// LicenseReset will reset the license to the builtin one if it is still valid.
// If the builtin license is invalid, the current license stays active.
+//
+// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses
+// are now set via agent configuration instead of through the API
func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) {
var reply LicenseReply
r := op.c.newRequest("DELETE", "/v1/operator/license")
@@ -87,14 +90,16 @@ func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) {
return nil, err
}
defer resp.Body.Close()
-
if err := decodeBody(resp, &reply); err != nil {
return nil, err
}
-
return &reply, nil
}
+// LicensePut will configure the Consul Enterprise license for the target datacenter
+//
+// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses
+// are now set via agent configuration instead of through the API
func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) {
var reply LicenseReply
r := op.c.newRequest("PUT", "/v1/operator/license")
diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go
index c6d7165d4..0bfb85d00 100644
--- a/vendor/github.com/hashicorp/consul/api/operator_raft.go
+++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go
@@ -44,7 +44,7 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var out RaftConfiguration
if err := decodeBody(resp, &out); err != nil {
@@ -67,7 +67,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
@@ -84,6 +84,6 @@ func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
return err
}
- resp.Body.Close()
+ closeResponseBody(resp)
return nil
}
diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go
index 5ac2535c7..5b2d5a5d1 100644
--- a/vendor/github.com/hashicorp/consul/api/prepared_query.go
+++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go
@@ -158,7 +158,7 @@ func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions)
if err != nil {
return "", nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
@@ -204,7 +204,7 @@ func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, err
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{}
wm.RequestTime = rtt
diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go
index 157ad53f5..3f61acfbb 100644
--- a/vendor/github.com/hashicorp/consul/api/session.go
+++ b/vendor/github.com/hashicorp/consul/api/session.go
@@ -141,7 +141,7 @@ func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta,
if err != nil {
return nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
wm := &WriteMeta{RequestTime: rtt}
diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go
index e902377dd..0c8294f37 100644
--- a/vendor/github.com/hashicorp/consul/api/snapshot.go
+++ b/vendor/github.com/hashicorp/consul/api/snapshot.go
@@ -38,6 +38,7 @@ func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
r := s.c.newRequest("PUT", "/v1/snapshot")
r.body = in
+ r.header.Set("Content-Type", "application/octet-stream")
r.setWriteOptions(q)
_, _, err := requireOK(s.c.doRequest(r))
if err != nil {
diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go
index 57f379c7b..2a81b9b5f 100644
--- a/vendor/github.com/hashicorp/consul/api/status.go
+++ b/vendor/github.com/hashicorp/consul/api/status.go
@@ -22,7 +22,7 @@ func (s *Status) LeaderWithQueryOptions(q *QueryOptions) (string, error) {
if err != nil {
return "", err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var leader string
if err := decodeBody(resp, &leader); err != nil {
@@ -47,7 +47,7 @@ func (s *Status) PeersWithQueryOptions(q *QueryOptions) ([]string, error) {
if err != nil {
return nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
var peers []string
if err := decodeBody(resp, &peers); err != nil {
diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go
index ef06bcbfe..55eb805f4 100644
--- a/vendor/github.com/hashicorp/consul/api/txn.go
+++ b/vendor/github.com/hashicorp/consul/api/txn.go
@@ -221,7 +221,7 @@ func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMet
if err != nil {
return false, nil, nil, err
}
- defer resp.Body.Close()
+ defer closeResponseBody(resp)
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
index 08a6677eb..7e86dc878 100644
--- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
+++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
@@ -213,34 +213,18 @@ func (i *interceptLogger) DeregisterSink(sink SinkAdapter) {
// Create a *log.Logger that will send it's data through this Logger. This
// allows packages that expect to be using the standard library to log to
// actually use this logger, which will also send to any registered sinks.
-func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
+func (l *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
if opts == nil {
opts = &StandardLoggerOptions{}
}
- return log.New(i.StandardWriterIntercept(opts), "", 0)
+ return log.New(l.StandardWriterIntercept(opts), "", 0)
}
-func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
+func (l *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
return &stdlogAdapter{
- log: i,
+ log: l,
inferLevels: opts.InferLevels,
forceLevel: opts.ForceLevel,
}
}
-
-func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error {
- if or, ok := i.Logger.(OutputResettable); ok {
- return or.ResetOutput(opts)
- } else {
- return nil
- }
-}
-
-func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
- if or, ok := i.Logger.(OutputResettable); ok {
- return or.ResetOutputWithFlush(opts, flushable)
- } else {
- return nil
- }
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go
index 1e5c84339..5d76ee3fb 100644
--- a/vendor/github.com/hashicorp/go-hclog/intlogger.go
+++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go
@@ -58,9 +58,9 @@ type intLogger struct {
name string
timeFormat string
- // This is an interface so that it's shared by any derived loggers, since
+ // This is a pointer so that it's shared by any derived loggers, since
// those derived loggers share the bufio.Writer as well.
- mutex Locker
+ mutex *sync.Mutex
writer *writer
level *int32
diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go
index 66f4b7d09..147bd2d78 100644
--- a/vendor/github.com/hashicorp/go-hclog/logger.go
+++ b/vendor/github.com/hashicorp/go-hclog/logger.go
@@ -5,6 +5,7 @@ import (
"log"
"os"
"strings"
+ "sync"
)
var (
@@ -89,25 +90,6 @@ func LevelFromString(levelStr string) Level {
}
}
-func (l Level) String() string {
- switch l {
- case Trace:
- return "trace"
- case Debug:
- return "debug"
- case Info:
- return "info"
- case Warn:
- return "warn"
- case Error:
- return "error"
- case NoLevel:
- return "none"
- default:
- return "unknown"
- }
-}
-
// Logger describes the interface that must be implemeted by all loggers.
type Logger interface {
// Args are alternating key, val pairs
@@ -204,10 +186,8 @@ type LoggerOptions struct {
// Where to write the logs to. Defaults to os.Stderr if nil
Output io.Writer
- // An optional Locker in case Output is shared. This can be a sync.Mutex or
- // a NoopLocker if the caller wants control over output, e.g. for batching
- // log lines.
- Mutex Locker
+ // An optional mutex pointer in case Output is shared
+ Mutex *sync.Mutex
// Control if the output should be in JSON.
JSONFormat bool
@@ -280,26 +260,3 @@ type OutputResettable interface {
// given in opts will be used for the new output.
ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error
}
-
-// Locker is used for locking output. If not set when creating a logger, a
-// sync.Mutex will be used internally.
-type Locker interface {
- // Lock is called when the output is going to be changed or written to
- Lock()
-
- // Unlock is called when the operation that called Lock() completes
- Unlock()
-}
-
-// NoopLocker implements locker but does nothing. This is useful if the client
-// wants tight control over locking, in order to provide grouping of log
-// entries or other functionality.
-type NoopLocker struct{}
-
-// Lock does nothing
-func (n NoopLocker) Lock() {}
-
-// Unlock does nothing
-func (n NoopLocker) Unlock() {}
-
-var _ Locker = (*NoopLocker)(nil)
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml
new file mode 100644
index 000000000..1a0bbea6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go:
+ - tip
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
deleted file mode 100644
index a967ae456..000000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# UNRELEASED
-
-FEATURES
-
-* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
-
-# 1.0.0 (August 30th, 2018)
-
-* go mod adopted
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md
index aca15a642..8910fcc03 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/README.md
+++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md
@@ -1,4 +1,4 @@
-go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
+go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix)
=========
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
@@ -39,28 +39,3 @@ if string(m) != "foo" {
}
```
-Here is an example of performing a range scan of the keys.
-
-```go
-// Create a tree
-r := iradix.New()
-r, _, _ = r.Insert([]byte("001"), 1)
-r, _, _ = r.Insert([]byte("002"), 2)
-r, _, _ = r.Insert([]byte("005"), 5)
-r, _, _ = r.Insert([]byte("010"), 10)
-r, _, _ = r.Insert([]byte("100"), 10)
-
-// Range scan over the keys that sort lexicographically between [003, 050)
-it := r.Root().Iterator()
-it.SeekLowerBound([]byte("003"))
-for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
- if key >= "050" {
- break
- }
- fmt.Println(key)
-}
-// Output:
-// 005
-// 010
-```
-
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
index 168bda76d..e5e6e57f2 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
@@ -86,20 +86,6 @@ func (t *Tree) Txn() *Txn {
return txn
}
-// Clone makes an independent copy of the transaction. The new transaction
-// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
-func (t *Txn) Clone() *Txn {
- // reset the writable node cache to avoid leaking future writes into the clone
- t.writable = nil
-
- txn := &Txn{
- root: t.root,
- snap: t.snap,
- size: t.size,
- }
- return txn
-}
-
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
// then notifications will be issued for affected internal nodes and leaves when
// the transaction is committed.
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
index 1ecaf831c..9815e0253 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
@@ -1,8 +1,6 @@
package iradix
-import (
- "bytes"
-)
+import "bytes"
// Iterator is used to iterate over a set of nodes
// in pre-order
@@ -55,101 +53,6 @@ func (i *Iterator) SeekPrefix(prefix []byte) {
i.SeekPrefixWatch(prefix)
}
-func (i *Iterator) recurseMin(n *Node) *Node {
- // Traverse to the minimum child
- if n.leaf != nil {
- return n
- }
- if len(n.edges) > 0 {
- // Add all the other edges to the stack (the min node will be added as
- // we recurse)
- i.stack = append(i.stack, n.edges[1:])
- return i.recurseMin(n.edges[0].node)
- }
- // Shouldn't be possible
- return nil
-}
-
-// SeekLowerBound is used to seek the iterator to the smallest key that is
-// greater or equal to the given key. There is no watch variant as it's hard to
-// predict based on the radix structure which node(s) changes might affect the
-// result.
-func (i *Iterator) SeekLowerBound(key []byte) {
- // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
- // go because we need only a subset of edges of many nodes in the path to the
- // leaf with the lower bound.
- i.stack = []edges{}
- n := i.node
- search := key
-
- found := func(n *Node) {
- i.node = n
- i.stack = append(i.stack, edges{edge{node: n}})
- }
-
- for {
- // Compare current prefix with the search key's same-length prefix.
- var prefixCmp int
- if len(n.prefix) < len(search) {
- prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
- } else {
- prefixCmp = bytes.Compare(n.prefix, search)
- }
-
- if prefixCmp > 0 {
- // Prefix is larger, that means the lower bound is greater than the search
- // and from now on we need to follow the minimum path to the smallest
- // leaf under this subtree.
- n = i.recurseMin(n)
- if n != nil {
- found(n)
- }
- return
- }
-
- if prefixCmp < 0 {
- // Prefix is smaller than search prefix, that means there is no lower
- // bound
- i.node = nil
- return
- }
-
- // Prefix is equal, we are still heading for an exact match. If this is a
- // leaf we're done.
- if n.leaf != nil {
- if bytes.Compare(n.leaf.key, key) < 0 {
- i.node = nil
- return
- }
- found(n)
- return
- }
-
- // Consume the search prefix
- if len(n.prefix) > len(search) {
- search = []byte{}
- } else {
- search = search[len(n.prefix):]
- }
-
- // Otherwise, take the lower bound next edge.
- idx, lbNode := n.getLowerBoundEdge(search[0])
- if lbNode == nil {
- i.node = nil
- return
- }
-
- // Create stack edges for the all strictly higher edges in this node.
- if idx+1 < len(n.edges) {
- i.stack = append(i.stack, n.edges[idx+1:])
- }
-
- i.node = lbNode
- // Recurse
- n = lbNode
- }
-}
-
// Next returns the next node in order
func (i *Iterator) Next() ([]byte, interface{}, bool) {
// Initialize our stack if needed
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go
index 3ab904edc..7a065e7a0 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/node.go
+++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go
@@ -79,18 +79,6 @@ func (n *Node) getEdge(label byte) (int, *Node) {
return -1, nil
}
-func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
- num := len(n.edges)
- idx := sort.Search(num, func(i int) bool {
- return n.edges[i].label >= label
- })
- // we want lower bound behavior so return even if it's not an exact match
- if idx < num {
- return idx, n.edges[idx].node
- }
- return -1, nil
-}
-
func (n *Node) delEdge(label byte) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore
deleted file mode 100644
index 836562412..000000000
--- a/vendor/github.com/hashicorp/golang-lru/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go
deleted file mode 100644
index e474cd075..000000000
--- a/vendor/github.com/hashicorp/golang-lru/2q.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package lru
-
-import (
- "fmt"
- "sync"
-
- "github.com/hashicorp/golang-lru/simplelru"
-)
-
-const (
- // Default2QRecentRatio is the ratio of the 2Q cache dedicated
- // to recently added entries that have only been accessed once.
- Default2QRecentRatio = 0.25
-
- // Default2QGhostEntries is the default ratio of ghost
- // entries kept to track entries recently evicted
- Default2QGhostEntries = 0.50
-)
-
-// TwoQueueCache is a thread-safe fixed size 2Q cache.
-// 2Q is an enhancement over the standard LRU cache
-// in that it tracks both frequently and recently used
-// entries separately. This avoids a burst in access to new
-// entries from evicting frequently used entries. It adds some
-// additional tracking overhead to the standard LRU cache, and is
-// computationally about 2x the cost, and adds some metadata over
-// head. The ARCCache is similar, but does not require setting any
-// parameters.
-type TwoQueueCache struct {
- size int
- recentSize int
-
- recent simplelru.LRUCache
- frequent simplelru.LRUCache
- recentEvict simplelru.LRUCache
- lock sync.RWMutex
-}
-
-// New2Q creates a new TwoQueueCache using the default
-// values for the parameters.
-func New2Q(size int) (*TwoQueueCache, error) {
- return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
-}
-
-// New2QParams creates a new TwoQueueCache using the provided
-// parameter values.
-func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
- if size <= 0 {
- return nil, fmt.Errorf("invalid size")
- }
- if recentRatio < 0.0 || recentRatio > 1.0 {
- return nil, fmt.Errorf("invalid recent ratio")
- }
- if ghostRatio < 0.0 || ghostRatio > 1.0 {
- return nil, fmt.Errorf("invalid ghost ratio")
- }
-
- // Determine the sub-sizes
- recentSize := int(float64(size) * recentRatio)
- evictSize := int(float64(size) * ghostRatio)
-
- // Allocate the LRUs
- recent, err := simplelru.NewLRU(size, nil)
- if err != nil {
- return nil, err
- }
- frequent, err := simplelru.NewLRU(size, nil)
- if err != nil {
- return nil, err
- }
- recentEvict, err := simplelru.NewLRU(evictSize, nil)
- if err != nil {
- return nil, err
- }
-
- // Initialize the cache
- c := &TwoQueueCache{
- size: size,
- recentSize: recentSize,
- recent: recent,
- frequent: frequent,
- recentEvict: recentEvict,
- }
- return c, nil
-}
-
-// Get looks up a key's value from the cache.
-func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Check if this is a frequent value
- if val, ok := c.frequent.Get(key); ok {
- return val, ok
- }
-
- // If the value is contained in recent, then we
- // promote it to frequent
- if val, ok := c.recent.Peek(key); ok {
- c.recent.Remove(key)
- c.frequent.Add(key, val)
- return val, ok
- }
-
- // No hit
- return nil, false
-}
-
-// Add adds a value to the cache.
-func (c *TwoQueueCache) Add(key, value interface{}) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Check if the value is frequently used already,
- // and just update the value
- if c.frequent.Contains(key) {
- c.frequent.Add(key, value)
- return
- }
-
- // Check if the value is recently used, and promote
- // the value into the frequent list
- if c.recent.Contains(key) {
- c.recent.Remove(key)
- c.frequent.Add(key, value)
- return
- }
-
- // If the value was recently evicted, add it to the
- // frequently used list
- if c.recentEvict.Contains(key) {
- c.ensureSpace(true)
- c.recentEvict.Remove(key)
- c.frequent.Add(key, value)
- return
- }
-
- // Add to the recently seen list
- c.ensureSpace(false)
- c.recent.Add(key, value)
- return
-}
-
-// ensureSpace is used to ensure we have space in the cache
-func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
- // If we have space, nothing to do
- recentLen := c.recent.Len()
- freqLen := c.frequent.Len()
- if recentLen+freqLen < c.size {
- return
- }
-
- // If the recent buffer is larger than
- // the target, evict from there
- if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
- k, _, _ := c.recent.RemoveOldest()
- c.recentEvict.Add(k, nil)
- return
- }
-
- // Remove from the frequent list otherwise
- c.frequent.RemoveOldest()
-}
-
-// Len returns the number of items in the cache.
-func (c *TwoQueueCache) Len() int {
- c.lock.RLock()
- defer c.lock.RUnlock()
- return c.recent.Len() + c.frequent.Len()
-}
-
-// Keys returns a slice of the keys in the cache.
-// The frequently used keys are first in the returned slice.
-func (c *TwoQueueCache) Keys() []interface{} {
- c.lock.RLock()
- defer c.lock.RUnlock()
- k1 := c.frequent.Keys()
- k2 := c.recent.Keys()
- return append(k1, k2...)
-}
-
-// Remove removes the provided key from the cache.
-func (c *TwoQueueCache) Remove(key interface{}) {
- c.lock.Lock()
- defer c.lock.Unlock()
- if c.frequent.Remove(key) {
- return
- }
- if c.recent.Remove(key) {
- return
- }
- if c.recentEvict.Remove(key) {
- return
- }
-}
-
-// Purge is used to completely clear the cache.
-func (c *TwoQueueCache) Purge() {
- c.lock.Lock()
- defer c.lock.Unlock()
- c.recent.Purge()
- c.frequent.Purge()
- c.recentEvict.Purge()
-}
-
-// Contains is used to check if the cache contains a key
-// without updating recency or frequency.
-func (c *TwoQueueCache) Contains(key interface{}) bool {
- c.lock.RLock()
- defer c.lock.RUnlock()
- return c.frequent.Contains(key) || c.recent.Contains(key)
-}
-
-// Peek is used to inspect the cache value of a key
-// without updating recency or frequency.
-func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
- c.lock.RLock()
- defer c.lock.RUnlock()
- if val, ok := c.frequent.Peek(key); ok {
- return val, ok
- }
- return c.recent.Peek(key)
-}
diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md
deleted file mode 100644
index 33e58cfaf..000000000
--- a/vendor/github.com/hashicorp/golang-lru/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-golang-lru
-==========
-
-This provides the `lru` package which implements a fixed-size
-thread safe LRU cache. It is based on the cache in Groupcache.
-
-Documentation
-=============
-
-Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
-
-Example
-=======
-
-Using the LRU is very simple:
-
-```go
-l, _ := New(128)
-for i := 0; i < 256; i++ {
- l.Add(i, nil)
-}
-if l.Len() != 128 {
- panic(fmt.Sprintf("bad len: %v", l.Len()))
-}
-```
diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go
deleted file mode 100644
index 555225a21..000000000
--- a/vendor/github.com/hashicorp/golang-lru/arc.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package lru
-
-import (
- "sync"
-
- "github.com/hashicorp/golang-lru/simplelru"
-)
-
-// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
-// ARC is an enhancement over the standard LRU cache in that tracks both
-// frequency and recency of use. This avoids a burst in access to new
-// entries from evicting the frequently used older entries. It adds some
-// additional tracking overhead to a standard LRU cache, computationally
-// it is roughly 2x the cost, and the extra memory overhead is linear
-// with the size of the cache. ARC has been patented by IBM, but is
-// similar to the TwoQueueCache (2Q) which requires setting parameters.
-type ARCCache struct {
- size int // Size is the total capacity of the cache
- p int // P is the dynamic preference towards T1 or T2
-
- t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
- b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
-
- t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
- b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
-
- lock sync.RWMutex
-}
-
-// NewARC creates an ARC of the given size
-func NewARC(size int) (*ARCCache, error) {
- // Create the sub LRUs
- b1, err := simplelru.NewLRU(size, nil)
- if err != nil {
- return nil, err
- }
- b2, err := simplelru.NewLRU(size, nil)
- if err != nil {
- return nil, err
- }
- t1, err := simplelru.NewLRU(size, nil)
- if err != nil {
- return nil, err
- }
- t2, err := simplelru.NewLRU(size, nil)
- if err != nil {
- return nil, err
- }
-
- // Initialize the ARC
- c := &ARCCache{
- size: size,
- p: 0,
- t1: t1,
- b1: b1,
- t2: t2,
- b2: b2,
- }
- return c, nil
-}
-
-// Get looks up a key's value from the cache.
-func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // If the value is contained in T1 (recent), then
- // promote it to T2 (frequent)
- if val, ok := c.t1.Peek(key); ok {
- c.t1.Remove(key)
- c.t2.Add(key, val)
- return val, ok
- }
-
- // Check if the value is contained in T2 (frequent)
- if val, ok := c.t2.Get(key); ok {
- return val, ok
- }
-
- // No hit
- return nil, false
-}
-
-// Add adds a value to the cache.
-func (c *ARCCache) Add(key, value interface{}) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Check if the value is contained in T1 (recent), and potentially
- // promote it to frequent T2
- if c.t1.Contains(key) {
- c.t1.Remove(key)
- c.t2.Add(key, value)
- return
- }
-
- // Check if the value is already in T2 (frequent) and update it
- if c.t2.Contains(key) {
- c.t2.Add(key, value)
- return
- }
-
- // Check if this value was recently evicted as part of the
- // recently used list
- if c.b1.Contains(key) {
- // T1 set is too small, increase P appropriately
- delta := 1
- b1Len := c.b1.Len()
- b2Len := c.b2.Len()
- if b2Len > b1Len {
- delta = b2Len / b1Len
- }
- if c.p+delta >= c.size {
- c.p = c.size
- } else {
- c.p += delta
- }
-
- // Potentially need to make room in the cache
- if c.t1.Len()+c.t2.Len() >= c.size {
- c.replace(false)
- }
-
- // Remove from B1
- c.b1.Remove(key)
-
- // Add the key to the frequently used list
- c.t2.Add(key, value)
- return
- }
-
- // Check if this value was recently evicted as part of the
- // frequently used list
- if c.b2.Contains(key) {
- // T2 set is too small, decrease P appropriately
- delta := 1
- b1Len := c.b1.Len()
- b2Len := c.b2.Len()
- if b1Len > b2Len {
- delta = b1Len / b2Len
- }
- if delta >= c.p {
- c.p = 0
- } else {
- c.p -= delta
- }
-
- // Potentially need to make room in the cache
- if c.t1.Len()+c.t2.Len() >= c.size {
- c.replace(true)
- }
-
- // Remove from B2
- c.b2.Remove(key)
-
- // Add the key to the frequently used list
- c.t2.Add(key, value)
- return
- }
-
- // Potentially need to make room in the cache
- if c.t1.Len()+c.t2.Len() >= c.size {
- c.replace(false)
- }
-
- // Keep the size of the ghost buffers trim
- if c.b1.Len() > c.size-c.p {
- c.b1.RemoveOldest()
- }
- if c.b2.Len() > c.p {
- c.b2.RemoveOldest()
- }
-
- // Add to the recently seen list
- c.t1.Add(key, value)
- return
-}
-
-// replace is used to adaptively evict from either T1 or T2
-// based on the current learned value of P
-func (c *ARCCache) replace(b2ContainsKey bool) {
- t1Len := c.t1.Len()
- if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
- k, _, ok := c.t1.RemoveOldest()
- if ok {
- c.b1.Add(k, nil)
- }
- } else {
- k, _, ok := c.t2.RemoveOldest()
- if ok {
- c.b2.Add(k, nil)
- }
- }
-}
-
-// Len returns the number of cached entries
-func (c *ARCCache) Len() int {
- c.lock.RLock()
- defer c.lock.RUnlock()
- return c.t1.Len() + c.t2.Len()
-}
-
-// Keys returns all the cached keys
-func (c *ARCCache) Keys() []interface{} {
- c.lock.RLock()
- defer c.lock.RUnlock()
- k1 := c.t1.Keys()
- k2 := c.t2.Keys()
- return append(k1, k2...)
-}
-
-// Remove is used to purge a key from the cache
-func (c *ARCCache) Remove(key interface{}) {
- c.lock.Lock()
- defer c.lock.Unlock()
- if c.t1.Remove(key) {
- return
- }
- if c.t2.Remove(key) {
- return
- }
- if c.b1.Remove(key) {
- return
- }
- if c.b2.Remove(key) {
- return
- }
-}
-
-// Purge is used to clear the cache
-func (c *ARCCache) Purge() {
- c.lock.Lock()
- defer c.lock.Unlock()
- c.t1.Purge()
- c.t2.Purge()
- c.b1.Purge()
- c.b2.Purge()
-}
-
-// Contains is used to check if the cache contains a key
-// without updating recency or frequency.
-func (c *ARCCache) Contains(key interface{}) bool {
- c.lock.RLock()
- defer c.lock.RUnlock()
- return c.t1.Contains(key) || c.t2.Contains(key)
-}
-
-// Peek is used to inspect the cache value of a key
-// without updating recency or frequency.
-func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
- c.lock.RLock()
- defer c.lock.RUnlock()
- if val, ok := c.t1.Peek(key); ok {
- return val, ok
- }
- return c.t2.Peek(key)
-}
diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go
deleted file mode 100644
index 2547df979..000000000
--- a/vendor/github.com/hashicorp/golang-lru/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Package lru provides three different LRU caches of varying sophistication.
-//
-// Cache is a simple LRU cache. It is based on the
-// LRU implementation in groupcache:
-// https://github.com/golang/groupcache/tree/master/lru
-//
-// TwoQueueCache tracks frequently used and recently used entries separately.
-// This avoids a burst of accesses from taking out frequently used entries,
-// at the cost of about 2x computational overhead and some extra bookkeeping.
-//
-// ARCCache is an adaptive replacement cache. It tracks recent evictions as
-// well as recent usage in both the frequent and recent caches. Its
-// computational overhead is comparable to TwoQueueCache, but the memory
-// overhead is linear with the size of the cache.
-//
-// ARC has been patented by IBM, so do not use it if that is problematic for
-// your program.
-//
-// All caches in this package take locks while operating, and are therefore
-// thread-safe for consumers.
-package lru
diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod
deleted file mode 100644
index 8ad8826b3..000000000
--- a/vendor/github.com/hashicorp/golang-lru/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/hashicorp/golang-lru
-
-go 1.12
diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go
deleted file mode 100644
index 4e5e9d8fd..000000000
--- a/vendor/github.com/hashicorp/golang-lru/lru.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package lru
-
-import (
- "sync"
-
- "github.com/hashicorp/golang-lru/simplelru"
-)
-
-// Cache is a thread-safe fixed size LRU cache.
-type Cache struct {
- lru simplelru.LRUCache
- lock sync.RWMutex
-}
-
-// New creates an LRU of the given size.
-func New(size int) (*Cache, error) {
- return NewWithEvict(size, nil)
-}
-
-// NewWithEvict constructs a fixed size cache with the given eviction
-// callback.
-func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
- lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
- if err != nil {
- return nil, err
- }
- c := &Cache{
- lru: lru,
- }
- return c, nil
-}
-
-// Purge is used to completely clear the cache.
-func (c *Cache) Purge() {
- c.lock.Lock()
- c.lru.Purge()
- c.lock.Unlock()
-}
-
-// Add adds a value to the cache. Returns true if an eviction occurred.
-func (c *Cache) Add(key, value interface{}) (evicted bool) {
- c.lock.Lock()
- evicted = c.lru.Add(key, value)
- c.lock.Unlock()
- return evicted
-}
-
-// Get looks up a key's value from the cache.
-func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
- c.lock.Lock()
- value, ok = c.lru.Get(key)
- c.lock.Unlock()
- return value, ok
-}
-
-// Contains checks if a key is in the cache, without updating the
-// recent-ness or deleting it for being stale.
-func (c *Cache) Contains(key interface{}) bool {
- c.lock.RLock()
- containKey := c.lru.Contains(key)
- c.lock.RUnlock()
- return containKey
-}
-
-// Peek returns the key value (or undefined if not found) without updating
-// the "recently used"-ness of the key.
-func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
- c.lock.RLock()
- value, ok = c.lru.Peek(key)
- c.lock.RUnlock()
- return value, ok
-}
-
-// ContainsOrAdd checks if a key is in the cache without updating the
-// recent-ness or deleting it for being stale, and if not, adds the value.
-// Returns whether found and whether an eviction occurred.
-func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if c.lru.Contains(key) {
- return true, false
- }
- evicted = c.lru.Add(key, value)
- return false, evicted
-}
-
-// PeekOrAdd checks if a key is in the cache without updating the
-// recent-ness or deleting it for being stale, and if not, adds the value.
-// Returns whether found and whether an eviction occurred.
-func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- previous, ok = c.lru.Peek(key)
- if ok {
- return previous, true, false
- }
-
- evicted = c.lru.Add(key, value)
- return nil, false, evicted
-}
-
-// Remove removes the provided key from the cache.
-func (c *Cache) Remove(key interface{}) (present bool) {
- c.lock.Lock()
- present = c.lru.Remove(key)
- c.lock.Unlock()
- return
-}
-
-// Resize changes the cache size.
-func (c *Cache) Resize(size int) (evicted int) {
- c.lock.Lock()
- evicted = c.lru.Resize(size)
- c.lock.Unlock()
- return evicted
-}
-
-// RemoveOldest removes the oldest item from the cache.
-func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) {
- c.lock.Lock()
- key, value, ok = c.lru.RemoveOldest()
- c.lock.Unlock()
- return
-}
-
-// GetOldest returns the oldest entry
-func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) {
- c.lock.Lock()
- key, value, ok = c.lru.GetOldest()
- c.lock.Unlock()
- return
-}
-
-// Keys returns a slice of the keys in the cache, from oldest to newest.
-func (c *Cache) Keys() []interface{} {
- c.lock.RLock()
- keys := c.lru.Keys()
- c.lock.RUnlock()
- return keys
-}
-
-// Len returns the number of items in the cache.
-func (c *Cache) Len() int {
- c.lock.RLock()
- length := c.lru.Len()
- c.lock.RUnlock()
- return length
-}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
index a86c8539e..5673773b2 100644
--- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -73,9 +73,6 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
- if ent.Value.(*entry) == nil {
- return nil, false
- }
return ent.Value.(*entry).value, true
}
return
@@ -145,19 +142,6 @@ func (c *LRU) Len() int {
return c.evictList.Len()
}
-// Resize changes the cache size.
-func (c *LRU) Resize(size int) (evicted int) {
- diff := c.Len() - size
- if diff < 0 {
- diff = 0
- }
- for i := 0; i < diff; i++ {
- c.removeOldest()
- }
- c.size = size
- return diff
-}
-
// removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() {
ent := c.evictList.Back()
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
index 92d70934d..74c707744 100644
--- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
@@ -10,7 +10,7 @@ type LRUCache interface {
// updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool)
- // Checks if a key exists in cache without updating the recent-ness.
+ // Check if a key exsists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
@@ -31,9 +31,6 @@ type LRUCache interface {
// Returns the number of items in the cache.
Len() int
- // Clears all cache entries.
+ // Clear all cache entries
Purge()
-
- // Resizes cache, returning number evicted
- Resize(int) int
}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go
index 1a62f8bab..622f7bf44 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go
@@ -96,12 +96,16 @@ func (c *ActionClient) GetByID(ctx context.Context, id int) (*Action, *Response,
// ActionListOpts specifies options for listing actions.
type ActionListOpts struct {
ListOpts
+ ID []int
Status []ActionStatus
Sort []string
}
func (l ActionListOpts) values() url.Values {
vals := l.ListOpts.values()
+ for _, id := range l.ID {
+ vals.Add("id", fmt.Sprintf("%d", id))
+ }
for _, status := range l.Status {
vals.Add("status", string(status))
}
@@ -141,7 +145,7 @@ func (c *ActionClient) All(ctx context.Context) ([]*Action, error) {
opts := ActionListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
actions, resp, err := c.List(ctx, opts)
if err != nil {
@@ -157,24 +161,97 @@ func (c *ActionClient) All(ctx context.Context) ([]*Action, error) {
return allActions, nil
}
-// WatchProgress watches the action's progress until it completes with success or error.
-func (c *ActionClient) WatchProgress(ctx context.Context, action *Action) (<-chan int, <-chan error) {
- errCh := make(chan error, 1)
+// AllWithOpts returns all actions for the given options.
+func (c *ActionClient) AllWithOpts(ctx context.Context, opts ActionListOpts) ([]*Action, error) {
+ allActions := []*Action{}
+
+ err := c.client.all(func(page int) (*Response, error) {
+ opts.Page = page
+ actions, resp, err := c.List(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ allActions = append(allActions, actions...)
+ return resp, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return allActions, nil
+}
+
+// WatchOverallProgress watches several actions' progress until they complete with success or error.
+func (c *ActionClient) WatchOverallProgress(ctx context.Context, actions []*Action) (<-chan int, <-chan error) {
+ errCh := make(chan error, len(actions))
progressCh := make(chan int)
go func() {
defer close(errCh)
defer close(progressCh)
+ successIDs := make([]int, 0, len(actions))
+ watchIDs := make(map[int]struct{}, len(actions))
+ for _, action := range actions {
+ watchIDs[action.ID] = struct{}{}
+ }
+
ticker := time.NewTicker(c.client.pollInterval)
- sendProgress := func(p int) {
+ defer ticker.Stop()
+ for {
select {
- case progressCh <- p:
- break
- default:
+ case <-ctx.Done():
+ errCh <- ctx.Err()
+ return
+ case <-ticker.C:
break
}
+
+ opts := ActionListOpts{}
+ for watchID := range watchIDs {
+ opts.ID = append(opts.ID, watchID)
+ }
+
+ as, err := c.AllWithOpts(ctx, opts)
+ if err != nil {
+ errCh <- err
+ return
+ }
+
+ for _, a := range as {
+ switch a.Status {
+ case ActionStatusRunning:
+ continue
+ case ActionStatusSuccess:
+ delete(watchIDs, a.ID)
+ successIDs := append(successIDs, a.ID)
+ sendProgress(progressCh, int(float64(len(actions)-len(successIDs))/float64(len(actions))*100))
+ case ActionStatusError:
+ delete(watchIDs, a.ID)
+ errCh <- fmt.Errorf("action %d failed: %w", a.ID, a.Error())
+ }
+ }
+
+ if len(watchIDs) == 0 {
+ return
+ }
}
+ }()
+
+ return progressCh, errCh
+}
+
+// WatchProgress watches one action's progress until it completes with success or error.
+func (c *ActionClient) WatchProgress(ctx context.Context, action *Action) (<-chan int, <-chan error) {
+ errCh := make(chan error, 1)
+ progressCh := make(chan int)
+
+ go func() {
+ defer close(errCh)
+ defer close(progressCh)
+
+ ticker := time.NewTicker(c.client.pollInterval)
+ defer ticker.Stop()
for {
select {
@@ -193,10 +270,9 @@ func (c *ActionClient) WatchProgress(ctx context.Context, action *Action) (<-cha
switch a.Status {
case ActionStatusRunning:
- sendProgress(a.Progress)
- break
+ sendProgress(progressCh, a.Progress)
case ActionStatusSuccess:
- sendProgress(100)
+ sendProgress(progressCh, 100)
errCh <- nil
return
case ActionStatusError:
@@ -208,3 +284,12 @@ func (c *ActionClient) WatchProgress(ctx context.Context, action *Action) (<-cha
return progressCh, errCh
}
+
+func sendProgress(progressCh chan int, p int) {
+ select {
+ case progressCh <- p:
+ break
+ default:
+ break
+ }
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go
index 5d34f70e1..771a67e8b 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go
@@ -13,17 +13,81 @@ import (
"github.com/hetznercloud/hcloud-go/hcloud/schema"
)
+// CertificateType is the type of available certificate types.
+type CertificateType string
+
+// Available certificate types.
+const (
+ CertificateTypeUploaded CertificateType = "uploaded"
+ CertificateTypeManaged CertificateType = "managed"
+)
+
+// CertificateStatusType is defines the type for the various managed
+// certificate status.
+type CertificateStatusType string
+
+// Possible certificate status.
+const (
+ CertificateStatusTypePending CertificateStatusType = "pending"
+ CertificateStatusTypeFailed CertificateStatusType = "failed"
+
+ // only in issuance
+ CertificateStatusTypeCompleted CertificateStatusType = "completed"
+
+ // only in renewal
+ CertificateStatusTypeScheduled CertificateStatusType = "scheduled"
+ CertificateStatusTypeUnavailable CertificateStatusType = "unavailable"
+)
+
+// CertificateUsedByRefType is the type of used by references for
+// certificates.
+type CertificateUsedByRefType string
+
+// Possible users of certificates.
+const (
+ CertificateUsedByRefTypeLoadBalancer CertificateUsedByRefType = "load_balancer"
+)
+
+// CertificateUsedByRef points to a resource that uses this certificate.
+type CertificateUsedByRef struct {
+ ID int
+ Type CertificateUsedByRefType
+}
+
+// CertificateStatus indicates the status of a managed certificate.
+type CertificateStatus struct {
+ Issuance CertificateStatusType
+ Renewal CertificateStatusType
+ Error *Error
+}
+
+// IsFailed returns true if either the Issuance or the Renewal of a certificate
+// failed. In this case the FailureReason field details the nature of the
+// failure.
+func (st *CertificateStatus) IsFailed() bool {
+ return st.Issuance == CertificateStatusTypeFailed || st.Renewal == CertificateStatusTypeFailed
+}
+
// Certificate represents an certificate in the Hetzner Cloud.
type Certificate struct {
ID int
Name string
Labels map[string]string
+ Type CertificateType
Certificate string
Created time.Time
NotValidBefore time.Time
NotValidAfter time.Time
DomainNames []string
Fingerprint string
+ Status *CertificateStatus
+ UsedBy []CertificateUsedByRef
+}
+
+// CertificateCreateResult is the result of creating a certificate.
+type CertificateCreateResult struct {
+ Certificate *Certificate
+ Action *Action
}
// CertificateClient is a client for the Certificates API.
@@ -114,7 +178,7 @@ func (c *CertificateClient) All(ctx context.Context) ([]*Certificate, error) {
opts := CertificateListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
Certificate, resp, err := c.List(ctx, opts)
if err != nil {
@@ -134,7 +198,7 @@ func (c *CertificateClient) All(ctx context.Context) ([]*Certificate, error) {
func (c *CertificateClient) AllWithOpts(ctx context.Context, opts CertificateListOpts) ([]*Certificate, error) {
var allCertificates []*Certificate
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
Certificates, resp, err := c.List(ctx, opts)
if err != nil {
@@ -153,9 +217,11 @@ func (c *CertificateClient) AllWithOpts(ctx context.Context, opts CertificateLis
// CertificateCreateOpts specifies options for creating a new Certificate.
type CertificateCreateOpts struct {
Name string
+ Type CertificateType
Certificate string
PrivateKey string
Labels map[string]string
+ DomainNames []string
}
// Validate checks if options are valid.
@@ -163,6 +229,24 @@ func (o CertificateCreateOpts) Validate() error {
if o.Name == "" {
return errors.New("missing name")
}
+ switch o.Type {
+ case "", CertificateTypeUploaded:
+ return o.validateUploaded()
+ case CertificateTypeManaged:
+ return o.validateManaged()
+ default:
+ return fmt.Errorf("invalid type: %s", o.Type)
+ }
+}
+
+func (o CertificateCreateOpts) validateManaged() error {
+ if len(o.DomainNames) == 0 {
+ return errors.New("no domain names")
+ }
+ return nil
+}
+
+func (o CertificateCreateOpts) validateUploaded() error {
if o.Certificate == "" {
return errors.New("missing certificate")
}
@@ -172,34 +256,71 @@ func (o CertificateCreateOpts) Validate() error {
return nil
}
-// Create creates a new certificate.
+// Create creates a new certificate uploaded certificate.
+//
+// Create returns an error for certificates of any other type. Use
+// CreateCertificate to create such certificates.
func (c *CertificateClient) Create(ctx context.Context, opts CertificateCreateOpts) (*Certificate, *Response, error) {
+ if !(opts.Type == "" || opts.Type == CertificateTypeUploaded) {
+ return nil, nil, fmt.Errorf("invalid certificate type: %s", opts.Type)
+ }
+ result, resp, err := c.CreateCertificate(ctx, opts)
+ if err != nil {
+ return nil, resp, err
+ }
+ return result.Certificate, resp, nil
+}
+
+// CreateCertificate creates a new certificate of any type.
+func (c *CertificateClient) CreateCertificate(
+ ctx context.Context, opts CertificateCreateOpts,
+) (CertificateCreateResult, *Response, error) {
+ var (
+ action *Action
+ reqBody schema.CertificateCreateRequest
+ )
+
if err := opts.Validate(); err != nil {
- return nil, nil, err
+ return CertificateCreateResult{}, nil, err
}
- reqBody := schema.CertificateCreateRequest{
- Name: opts.Name,
- Certificate: opts.Certificate,
- PrivateKey: opts.PrivateKey,
+
+ reqBody.Name = opts.Name
+
+ switch opts.Type {
+ case "", CertificateTypeUploaded:
+ reqBody.Type = string(CertificateTypeUploaded)
+ reqBody.Certificate = opts.Certificate
+ reqBody.PrivateKey = opts.PrivateKey
+ case CertificateTypeManaged:
+ reqBody.Type = string(CertificateTypeManaged)
+ reqBody.DomainNames = opts.DomainNames
+ default:
+ return CertificateCreateResult{}, nil, fmt.Errorf("invalid certificate type: %v", opts.Type)
}
+
if opts.Labels != nil {
reqBody.Labels = &opts.Labels
}
reqBodyData, err := json.Marshal(reqBody)
if err != nil {
- return nil, nil, err
+ return CertificateCreateResult{}, nil, err
}
req, err := c.client.NewRequest(ctx, "POST", "/certificates", bytes.NewReader(reqBodyData))
if err != nil {
- return nil, nil, err
+ return CertificateCreateResult{}, nil, err
}
respBody := schema.CertificateCreateResponse{}
resp, err := c.client.Do(req, &respBody)
if err != nil {
- return nil, resp, err
+ return CertificateCreateResult{}, resp, err
}
- return CertificateFromSchema(respBody.Certificate), resp, nil
+ cert := CertificateFromSchema(respBody.Certificate)
+ if respBody.Action != nil {
+ action = ActionFromSchema(*respBody.Action)
+ }
+
+ return CertificateCreateResult{Certificate: cert, Action: action}, resp, nil
}
// CertificateUpdateOpts specifies options for updating a Certificate.
@@ -244,3 +365,19 @@ func (c *CertificateClient) Delete(ctx context.Context, certificate *Certificate
}
return c.client.Do(req, nil)
}
+
+// RetryIssuance retries the issuance of a failed managed certificate.
+func (c *CertificateClient) RetryIssuance(ctx context.Context, certificate *Certificate) (*Action, *Response, error) {
+ var respBody schema.CertificateIssuanceRetryResponse
+
+ req, err := c.client.NewRequest(ctx, "POST", fmt.Sprintf("/certificates/%d/actions/retry", certificate.ID), nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, nil, err
+ }
+ action := ActionFromSchema(respBody.Action)
+ return action, resp, nil
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go
index b03d45f69..07db0b7b7 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go
@@ -15,7 +15,10 @@ import (
"strings"
"time"
+ "github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation"
+
"github.com/hetznercloud/hcloud-go/hcloud/schema"
+ "github.com/prometheus/client_golang/prometheus"
)
// Endpoint is the base URL of the API.
@@ -48,19 +51,21 @@ func ExponentialBackoff(b float64, d time.Duration) BackoffFunc {
// Client is a client for the Hetzner Cloud API.
type Client struct {
- endpoint string
- token string
- pollInterval time.Duration
- backoffFunc BackoffFunc
- httpClient *http.Client
- applicationName string
- applicationVersion string
- userAgent string
- debugWriter io.Writer
+ endpoint string
+ token string
+ pollInterval time.Duration
+ backoffFunc BackoffFunc
+ httpClient *http.Client
+ applicationName string
+ applicationVersion string
+ userAgent string
+ debugWriter io.Writer
+ instrumentationRegistry *prometheus.Registry
Action ActionClient
Certificate CertificateClient
Datacenter DatacenterClient
+ Firewall FirewallClient
FloatingIP FloatingIPClient
Image ImageClient
ISO ISOClient
@@ -73,6 +78,8 @@ type Client struct {
ServerType ServerTypeClient
SSHKey SSHKeyClient
Volume VolumeClient
+ PlacementGroup PlacementGroupClient
+ RDNS RDNSClient
}
// A ClientOption is used to configure a Client.
@@ -132,6 +139,13 @@ func WithHTTPClient(httpClient *http.Client) ClientOption {
}
}
+// WithInstrumentation configures a Client to collect metrics about the performed HTTP requests.
+func WithInstrumentation(registry *prometheus.Registry) ClientOption {
+ return func(client *Client) {
+ client.instrumentationRegistry = registry
+ }
+}
+
// NewClient creates a new client.
func NewClient(options ...ClientOption) *Client {
client := &Client{
@@ -146,6 +160,10 @@ func NewClient(options ...ClientOption) *Client {
}
client.buildUserAgent()
+ if client.instrumentationRegistry != nil {
+ i := instrumentation.New("api", client.instrumentationRegistry)
+ client.httpClient.Transport = i.InstrumentedRoundTripper()
+ }
client.Action = ActionClient{client: client}
client.Datacenter = DatacenterClient{client: client}
@@ -162,6 +180,9 @@ func NewClient(options ...ClientOption) *Client {
client.LoadBalancer = LoadBalancerClient{client: client}
client.LoadBalancerType = LoadBalancerTypeClient{client: client}
client.Certificate = CertificateClient{client: client}
+ client.Firewall = FirewallClient{client: client}
+ client.PlacementGroup = PlacementGroupClient{client: client}
+ client.RDNS = RDNSClient{client: client}
return client
}
@@ -204,8 +225,7 @@ func (c *Client) Do(r *http.Request, v interface{}) (*Response, error) {
}
if c.debugWriter != nil {
- // To get the response body we need to read it before the request was actually send. https://github.com/golang/go/issues/29792
- dumpReq, err := httputil.DumpRequestOut(r, true)
+ dumpReq, err := dumpRequest(r)
if err != nil {
return nil, err
}
@@ -241,12 +261,10 @@ func (c *Client) Do(r *http.Request, v interface{}) (*Response, error) {
err = errorFromResponse(resp, body)
if err == nil {
err = fmt.Errorf("hcloud: server responded with status code %d", resp.StatusCode)
- } else {
- if isRetryable(err) {
- c.backoff(retries)
- retries++
- continue
- }
+ } else if isRetryable(err) {
+ c.backoff(retries)
+ retries++
+ continue
}
return response, err
}
@@ -274,17 +292,17 @@ func (c *Client) backoff(retries int) {
time.Sleep(c.backoffFunc(retries))
}
-func (c *Client) all(f func(int) (*Response, error)) (*Response, error) {
+func (c *Client) all(f func(int) (*Response, error)) error {
var (
page = 1
)
for {
resp, err := f(page)
if err != nil {
- return nil, err
+ return err
}
if resp.Meta.Pagination == nil || resp.Meta.Pagination.NextPage == 0 {
- return resp, nil
+ return nil
}
page = resp.Meta.Pagination.NextPage
}
@@ -301,6 +319,25 @@ func (c *Client) buildUserAgent() {
}
}
+func dumpRequest(r *http.Request) ([]byte, error) {
+ // Duplicate the request, so we can redact the auth header
+ rDuplicate := r.Clone(context.Background())
+ rDuplicate.Header.Set("Authorization", "REDACTED")
+
+ // To get the request body we need to read it before the request was actually sent.
+ // See https://github.com/golang/go/issues/29792
+ dumpReq, err := httputil.DumpRequestOut(rDuplicate, true)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set original request body to the duplicate created by DumpRequestOut. The request body is not duplicated
+ // by .Clone() and instead just referenced, so it would be completely read otherwise.
+ r.Body = rDuplicate.Body
+
+ return dumpReq, nil
+}
+
func errorFromResponse(resp *http.Response, body []byte) error {
if !strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") {
return nil
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go
index 272cd6aef..ae8b6c601 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go
@@ -112,7 +112,7 @@ func (c *DatacenterClient) All(ctx context.Context) ([]*Datacenter, error) {
opts := DatacenterListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
datacenters, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go
index db67652b7..d7cb7502f 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go
@@ -1,6 +1,9 @@
package hcloud
-import "fmt"
+import (
+ "fmt"
+ "net"
+)
// ErrorCode represents an error code returned from the API.
type ErrorCode string
@@ -51,6 +54,22 @@ const (
ErrorCodeNoSpaceLeftInLocation ErrorCode = "no_space_left_in_location" // There is no volume space left in the given location
ErrorCodeVolumeAlreadyAttached ErrorCode = "volume_already_attached" // Volume is already attached to a server, detach first
+ // Firewall related error codes
+ ErrorCodeFirewallAlreadyApplied ErrorCode = "firewall_already_applied" // Firewall was already applied on resource
+ ErrorCodeFirewallAlreadyRemoved ErrorCode = "firewall_already_removed" // Firewall was already removed from the resource
+ ErrorCodeIncompatibleNetworkType ErrorCode = "incompatible_network_type" // The Network type is incompatible for the given resource
+ ErrorCodeResourceInUse ErrorCode = "resource_in_use" // Firewall must not be in use to be deleted
+ ErrorCodeServerAlreadyAdded ErrorCode = "server_already_added" // Server added more than one time to resource
+
+ // Certificate related error codes
+ ErrorCodeCAARecordDoesNotAllowCA ErrorCode = "caa_record_does_not_allow_ca" // CAA record does not allow certificate authority
+ ErrorCodeCADNSValidationFailed ErrorCode = "ca_dns_validation_failed" // Certificate Authority: DNS validation failed
+ ErrorCodeCATooManyAuthorizationsFailedRecently ErrorCode = "ca_too_many_authorizations_failed_recently" // Certificate Authority: Too many authorizations failed recently
+ ErrorCodeCATooManyCertificatedIssuedForRegisteredDomain ErrorCode = "ca_too_many_certificates_issued_for_registered_domain" // Certificate Authority: Too many certificates issued for registered domain
+ ErrorCodeCATooManyDuplicateCertificates ErrorCode = "ca_too_many_duplicate_certificates" // Certificate Authority: Too many duplicate certificates
+ ErrorCodeCloudNotVerifyDomainDelegatedToZone ErrorCode = "could_not_verify_domain_delegated_to_zone" // Could not verify domain delegated to zone
+ ErrorCodeDNSZoneNotFound ErrorCode = "dns_zone_not_found" // DNS zone not found
+
// Deprecated error codes
// The actual value of this error code is limit_reached. The new error code
// rate_limit_exceeded for ratelimiting was introduced before Hetzner Cloud
@@ -87,3 +106,19 @@ func IsError(err error, code ErrorCode) bool {
apiErr, ok := err.(Error)
return ok && apiErr.Code == code
}
+
+type InvalidIPError struct {
+ IP string
+}
+
+func (e InvalidIPError) Error() string {
+ return fmt.Sprintf("could not parse ip address %s", e.IP)
+}
+
+type DNSNotFoundError struct {
+ IP net.IP
+}
+
+func (e DNSNotFoundError) Error() string {
+ return fmt.Sprintf("dns for ip %s not found", e.IP.String())
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go
new file mode 100644
index 000000000..d254ca606
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go
@@ -0,0 +1,384 @@
+package hcloud
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/hetznercloud/hcloud-go/hcloud/schema"
+)
+
+// Firewall represents a Firewall in the Hetzner Cloud.
+type Firewall struct {
+ ID int
+ Name string
+ Labels map[string]string
+ Created time.Time
+ Rules []FirewallRule
+ AppliedTo []FirewallResource
+}
+
+// FirewallRule represents a Firewall's rules.
+type FirewallRule struct {
+ Direction FirewallRuleDirection
+ SourceIPs []net.IPNet
+ DestinationIPs []net.IPNet
+ Protocol FirewallRuleProtocol
+ Port *string
+ Description *string
+}
+
+// FirewallRuleDirection specifies the direction of a Firewall rule.
+type FirewallRuleDirection string
+
+const (
+ // FirewallRuleDirectionIn specifies a rule for inbound traffic.
+ FirewallRuleDirectionIn FirewallRuleDirection = "in"
+
+ // FirewallRuleDirectionOut specifies a rule for outbound traffic.
+ FirewallRuleDirectionOut FirewallRuleDirection = "out"
+)
+
+// FirewallRuleProtocol specifies the protocol of a Firewall rule.
+type FirewallRuleProtocol string
+
+const (
+ // FirewallRuleProtocolTCP specifies a TCP rule.
+ FirewallRuleProtocolTCP FirewallRuleProtocol = "tcp"
+ // FirewallRuleProtocolUDP specifies a UDP rule.
+ FirewallRuleProtocolUDP FirewallRuleProtocol = "udp"
+ // FirewallRuleProtocolICMP specifies an ICMP rule.
+ FirewallRuleProtocolICMP FirewallRuleProtocol = "icmp"
+ // FirewallRuleProtocolESP specifies an esp rule.
+ FirewallRuleProtocolESP FirewallRuleProtocol = "esp"
+ // FirewallRuleProtocolGRE specifies an gre rule.
+ FirewallRuleProtocolGRE FirewallRuleProtocol = "gre"
+)
+
+// FirewallResourceType specifies the resource to apply a Firewall on.
+type FirewallResourceType string
+
+const (
+ // FirewallResourceTypeServer specifies a Server.
+ FirewallResourceTypeServer FirewallResourceType = "server"
+ // FirewallResourceTypeLabelSelector specifies a LabelSelector.
+ FirewallResourceTypeLabelSelector FirewallResourceType = "label_selector"
+)
+
+// FirewallResource represents a resource to apply the new Firewall on.
+type FirewallResource struct {
+ Type FirewallResourceType
+ Server *FirewallResourceServer
+ LabelSelector *FirewallResourceLabelSelector
+}
+
+// FirewallResourceServer represents a Server to apply a Firewall on.
+type FirewallResourceServer struct {
+ ID int
+}
+
+// FirewallResourceLabelSelector represents a LabelSelector to apply a Firewall on.
+type FirewallResourceLabelSelector struct {
+ Selector string
+}
+
+// FirewallClient is a client for the Firewalls API.
+type FirewallClient struct {
+ client *Client
+}
+
+// GetByID retrieves a Firewall by its ID. If the Firewall does not exist, nil is returned.
+func (c *FirewallClient) GetByID(ctx context.Context, id int) (*Firewall, *Response, error) {
+ req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/firewalls/%d", id), nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body schema.FirewallGetResponse
+ resp, err := c.client.Do(req, &body)
+ if err != nil {
+ if IsError(err, ErrorCodeNotFound) {
+ return nil, resp, nil
+ }
+ return nil, nil, err
+ }
+ return FirewallFromSchema(body.Firewall), resp, nil
+}
+
+// GetByName retrieves a Firewall by its name. If the Firewall does not exist, nil is returned.
+func (c *FirewallClient) GetByName(ctx context.Context, name string) (*Firewall, *Response, error) {
+ if name == "" {
+ return nil, nil, nil
+ }
+ firewalls, response, err := c.List(ctx, FirewallListOpts{Name: name})
+ if len(firewalls) == 0 {
+ return nil, response, err
+ }
+ return firewalls[0], response, err
+}
+
+// Get retrieves a Firewall by its ID if the input can be parsed as an integer, otherwise it
+// retrieves a Firewall by its name. If the Firewall does not exist, nil is returned.
+func (c *FirewallClient) Get(ctx context.Context, idOrName string) (*Firewall, *Response, error) {
+ if id, err := strconv.Atoi(idOrName); err == nil {
+ return c.GetByID(ctx, int(id))
+ }
+ return c.GetByName(ctx, idOrName)
+}
+
+// FirewallListOpts specifies options for listing Firewalls.
+type FirewallListOpts struct {
+ ListOpts
+ Name string
+}
+
+func (l FirewallListOpts) values() url.Values {
+ vals := l.ListOpts.values()
+ if l.Name != "" {
+ vals.Add("name", l.Name)
+ }
+ return vals
+}
+
+// List returns a list of Firewalls for a specific page.
+//
+// Please note that filters specified in opts are not taken into account
+// when their value corresponds to their zero value or when they are empty.
+func (c *FirewallClient) List(ctx context.Context, opts FirewallListOpts) ([]*Firewall, *Response, error) {
+ path := "/firewalls?" + opts.values().Encode()
+ req, err := c.client.NewRequest(ctx, "GET", path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body schema.FirewallListResponse
+ resp, err := c.client.Do(req, &body)
+ if err != nil {
+ return nil, nil, err
+ }
+ firewalls := make([]*Firewall, 0, len(body.Firewalls))
+ for _, s := range body.Firewalls {
+ firewalls = append(firewalls, FirewallFromSchema(s))
+ }
+ return firewalls, resp, nil
+}
+
+// All returns all Firewalls.
+func (c *FirewallClient) All(ctx context.Context) ([]*Firewall, error) {
+ allFirewalls := []*Firewall{}
+
+ opts := FirewallListOpts{}
+ opts.PerPage = 50
+
+ err := c.client.all(func(page int) (*Response, error) {
+ opts.Page = page
+ firewalls, resp, err := c.List(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ allFirewalls = append(allFirewalls, firewalls...)
+ return resp, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return allFirewalls, nil
+}
+
+// AllWithOpts returns all Firewalls for the given options.
+func (c *FirewallClient) AllWithOpts(ctx context.Context, opts FirewallListOpts) ([]*Firewall, error) {
+ var allFirewalls []*Firewall
+
+ err := c.client.all(func(page int) (*Response, error) {
+ opts.Page = page
+ firewalls, resp, err := c.List(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ allFirewalls = append(allFirewalls, firewalls...)
+ return resp, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return allFirewalls, nil
+}
+
+// FirewallCreateOpts specifies options for creating a new Firewall.
+type FirewallCreateOpts struct {
+ Name string
+ Labels map[string]string
+ Rules []FirewallRule
+ ApplyTo []FirewallResource
+}
+
+// Validate checks if options are valid.
+func (o FirewallCreateOpts) Validate() error {
+ if o.Name == "" {
+ return errors.New("missing name")
+ }
+ return nil
+}
+
+// FirewallCreateResult is the result of a create Firewall call.
+type FirewallCreateResult struct {
+ Firewall *Firewall
+ Actions []*Action
+}
+
+// Create creates a new Firewall.
+func (c *FirewallClient) Create(ctx context.Context, opts FirewallCreateOpts) (FirewallCreateResult, *Response, error) {
+ if err := opts.Validate(); err != nil {
+ return FirewallCreateResult{}, nil, err
+ }
+ reqBody := firewallCreateOptsToSchema(opts)
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return FirewallCreateResult{}, nil, err
+ }
+ req, err := c.client.NewRequest(ctx, "POST", "/firewalls", bytes.NewReader(reqBodyData))
+ if err != nil {
+ return FirewallCreateResult{}, nil, err
+ }
+
+ respBody := schema.FirewallCreateResponse{}
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return FirewallCreateResult{}, resp, err
+ }
+ result := FirewallCreateResult{
+ Firewall: FirewallFromSchema(respBody.Firewall),
+ Actions: ActionsFromSchema(respBody.Actions),
+ }
+ return result, resp, nil
+}
+
+// FirewallUpdateOpts specifies options for updating a Firewall.
+type FirewallUpdateOpts struct {
+ Name string
+ Labels map[string]string
+}
+
+// Update updates a Firewall.
+func (c *FirewallClient) Update(ctx context.Context, firewall *Firewall, opts FirewallUpdateOpts) (*Firewall, *Response, error) {
+ reqBody := schema.FirewallUpdateRequest{}
+ if opts.Name != "" {
+ reqBody.Name = &opts.Name
+ }
+ if opts.Labels != nil {
+ reqBody.Labels = &opts.Labels
+ }
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/firewalls/%d", firewall.ID)
+ req, err := c.client.NewRequest(ctx, "PUT", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ respBody := schema.FirewallUpdateResponse{}
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return FirewallFromSchema(respBody.Firewall), resp, nil
+}
+
+// Delete deletes a Firewall.
+func (c *FirewallClient) Delete(ctx context.Context, firewall *Firewall) (*Response, error) {
+ req, err := c.client.NewRequest(ctx, "DELETE", fmt.Sprintf("/firewalls/%d", firewall.ID), nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.client.Do(req, nil)
+}
+
+// FirewallSetRulesOpts specifies options for setting rules of a Firewall.
+type FirewallSetRulesOpts struct {
+ Rules []FirewallRule
+}
+
+// SetRules sets the rules of a Firewall.
+func (c *FirewallClient) SetRules(ctx context.Context, firewall *Firewall, opts FirewallSetRulesOpts) ([]*Action, *Response, error) {
+ reqBody := firewallSetRulesOptsToSchema(opts)
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/firewalls/%d/actions/set_rules", firewall.ID)
+ req, err := c.client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var respBody schema.FirewallActionSetRulesResponse
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionsFromSchema(respBody.Actions), resp, nil
+}
+
+func (c *FirewallClient) ApplyResources(ctx context.Context, firewall *Firewall, resources []FirewallResource) ([]*Action, *Response, error) {
+ applyTo := make([]schema.FirewallResource, len(resources))
+ for i, r := range resources {
+ applyTo[i] = firewallResourceToSchema(r)
+ }
+
+ reqBody := schema.FirewallActionApplyToResourcesRequest{ApplyTo: applyTo}
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/firewalls/%d/actions/apply_to_resources", firewall.ID)
+ req, err := c.client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var respBody schema.FirewallActionApplyToResourcesResponse
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionsFromSchema(respBody.Actions), resp, nil
+}
+
+func (c *FirewallClient) RemoveResources(ctx context.Context, firewall *Firewall, resources []FirewallResource) ([]*Action, *Response, error) {
+ removeFrom := make([]schema.FirewallResource, len(resources))
+ for i, r := range resources {
+ removeFrom[i] = firewallResourceToSchema(r)
+ }
+
+ reqBody := schema.FirewallActionRemoveFromResourcesRequest{RemoveFrom: removeFrom}
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/firewalls/%d/actions/remove_from_resources", firewall.ID)
+ req, err := c.client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var respBody schema.FirewallActionRemoveFromResourcesResponse
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionsFromSchema(respBody.Actions), resp, nil
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go
index 025351f40..c6bced5cd 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go
@@ -32,6 +32,7 @@ type FloatingIP struct {
}
// DNSPtrForIP returns the reverse DNS pointer of the IP address.
+// Deprecated: Use GetDNSPtrForIP instead
func (f *FloatingIP) DNSPtrForIP(ip net.IP) string {
return f.DNSPtr[ip.String()]
}
@@ -50,6 +51,43 @@ const (
FloatingIPTypeIPv6 FloatingIPType = "ipv6"
)
+// changeDNSPtr changes or resets the reverse DNS pointer for a IP address.
+// Pass a nil ptr to reset the reverse DNS pointer to its default value.
+func (f *FloatingIP) changeDNSPtr(ctx context.Context, client *Client, ip net.IP, ptr *string) (*Action, *Response, error) {
+ reqBody := schema.FloatingIPActionChangeDNSPtrRequest{
+ IP: ip.String(),
+ DNSPtr: ptr,
+ }
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/floating_ips/%d/actions/change_dns_ptr", f.ID)
+ req, err := client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ respBody := schema.FloatingIPActionChangeDNSPtrResponse{}
+ resp, err := client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionFromSchema(respBody.Action), resp, nil
+}
+
+// GetDNSPtrForIP searches for the dns assigned to the given IP address.
+// It returns an error if there is no dns set for the given IP address.
+func (f *FloatingIP) GetDNSPtrForIP(ip net.IP) (string, error) {
+ dns, ok := f.DNSPtr[ip.String()]
+ if !ok {
+ return "", DNSNotFoundError{ip}
+ }
+
+ return dns, nil
+}
+
// FloatingIPClient is a client for the Floating IP API.
type FloatingIPClient struct {
client *Client
@@ -141,7 +179,7 @@ func (c *FloatingIPClient) All(ctx context.Context) ([]*FloatingIP, error) {
func (c *FloatingIPClient) AllWithOpts(ctx context.Context, opts FloatingIPListOpts) ([]*FloatingIP, error) {
allFloatingIPs := []*FloatingIP{}
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
floatingIPs, resp, err := c.List(ctx, opts)
if err != nil {
@@ -325,27 +363,11 @@ func (c *FloatingIPClient) Unassign(ctx context.Context, floatingIP *FloatingIP)
// ChangeDNSPtr changes or resets the reverse DNS pointer for a Floating IP address.
// Pass a nil ptr to reset the reverse DNS pointer to its default value.
func (c *FloatingIPClient) ChangeDNSPtr(ctx context.Context, floatingIP *FloatingIP, ip string, ptr *string) (*Action, *Response, error) {
- reqBody := schema.FloatingIPActionChangeDNSPtrRequest{
- IP: ip,
- DNSPtr: ptr,
+ netIP := net.ParseIP(ip)
+ if netIP == nil {
+ return nil, nil, InvalidIPError{ip}
}
- reqBodyData, err := json.Marshal(reqBody)
- if err != nil {
- return nil, nil, err
- }
-
- path := fmt.Sprintf("/floating_ips/%d/actions/change_dns_ptr", floatingIP.ID)
- req, err := c.client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
- if err != nil {
- return nil, nil, err
- }
-
- respBody := schema.FloatingIPActionChangeDNSPtrResponse{}
- resp, err := c.client.Do(req, &respBody)
- if err != nil {
- return nil, resp, err
- }
- return ActionFromSchema(respBody.Action), resp, nil
+ return floatingIP.changeDNSPtr(ctx, c.client, net.ParseIP(ip), ptr)
}
// FloatingIPChangeProtectionOpts specifies options for changing the resource protection level of a Floating IP.
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go
index 55dcce408..d2f0edf2e 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go
@@ -2,4 +2,4 @@
package hcloud
// Version is the library's version following Semantic Versioning.
-const Version = "1.23.1"
+const Version = "1.32.0"
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go
index a0aa18f10..0867aac0d 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go
@@ -32,6 +32,7 @@ type Image struct {
Protection ImageProtection
Deprecated time.Time // The zero value denotes the image is not deprecated.
Labels map[string]string
+ Deleted time.Time
}
// IsDeprecated returns whether the image is deprecated.
@@ -39,6 +40,11 @@ func (image *Image) IsDeprecated() bool {
return !image.Deprecated.IsZero()
}
+// IsDeleted returns whether the image is deleted.
+func (image *Image) IsDeleted() bool {
+ return !image.Deleted.IsZero()
+}
+
// ImageProtection represents the protection level of an image.
type ImageProtection struct {
Delete bool
@@ -54,6 +60,8 @@ const (
ImageTypeBackup ImageType = "backup"
// ImageTypeSystem represents a system image.
ImageTypeSystem ImageType = "system"
+ // ImageTypeApp represents a one click app image.
+ ImageTypeApp ImageType = "app"
)
// ImageStatus specifies the status of an image.
@@ -176,7 +184,7 @@ func (c *ImageClient) All(ctx context.Context) ([]*Image, error) {
func (c *ImageClient) AllWithOpts(ctx context.Context, opts ImageListOpts) ([]*Image, error) {
allImages := []*Image{}
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
images, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go
new file mode 100644
index 000000000..d8e9a2e96
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go
@@ -0,0 +1,86 @@
+package instrumentation
+
+import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+type Instrumenter struct {
+ subsystemIdentifier string // will be used as part of the metric name (hcloud__requests_total)
+ instrumentationRegistry *prometheus.Registry
+}
+
+// New creates a new Instrumenter. The subsystemIdentifier will be used as part of the metric names (e.g. hcloud__requests_total)
+func New(subsystemIdentifier string, instrumentationRegistry *prometheus.Registry) *Instrumenter {
+ return &Instrumenter{subsystemIdentifier: subsystemIdentifier, instrumentationRegistry: instrumentationRegistry}
+}
+
+// InstrumentedRoundTripper returns an instrumented round tripper.
+func (i *Instrumenter) InstrumentedRoundTripper() http.RoundTripper {
+ inFlightRequestsGauge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: fmt.Sprintf("hcloud_%s_in_flight_requests", i.subsystemIdentifier),
+ Help: fmt.Sprintf("A gauge of in-flight requests to the hcloud %s.", i.subsystemIdentifier),
+ })
+
+ requestsPerEndpointCounter := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: fmt.Sprintf("hcloud_%s_requests_total", i.subsystemIdentifier),
+ Help: fmt.Sprintf("A counter for requests to the hcloud %s per endpoint.", i.subsystemIdentifier),
+ },
+ []string{"code", "method", "api_endpoint"},
+ )
+
+ requestLatencyHistogram := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: fmt.Sprintf("hcloud_%s_request_duration_seconds", i.subsystemIdentifier),
+ Help: fmt.Sprintf("A histogram of request latencies to the hcloud %s .", i.subsystemIdentifier),
+ Buckets: prometheus.DefBuckets,
+ },
+ []string{"method"},
+ )
+
+ i.instrumentationRegistry.MustRegister(requestsPerEndpointCounter, requestLatencyHistogram, inFlightRequestsGauge)
+
+ return promhttp.InstrumentRoundTripperInFlight(inFlightRequestsGauge,
+ promhttp.InstrumentRoundTripperDuration(requestLatencyHistogram,
+ i.instrumentRoundTripperEndpoint(requestsPerEndpointCounter,
+ http.DefaultTransport,
+ ),
+ ),
+ )
+}
+
+// instrumentRoundTripperEndpoint implements a hcloud specific round tripper to count requests per API endpoint
+// numeric IDs are removed from the URI Path.
+// Sample:
+// /volumes/1234/actions/attach --> /volumes/actions/attach
+func (i *Instrumenter) instrumentRoundTripperEndpoint(counter *prometheus.CounterVec, next http.RoundTripper) promhttp.RoundTripperFunc {
+ return func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ statusCode := strconv.Itoa(resp.StatusCode)
+ counter.WithLabelValues(statusCode, strings.ToLower(resp.Request.Method), preparePathForLabel(resp.Request.URL.Path)).Inc()
+ }
+ return resp, err
+ }
+}
+
+func preparePathForLabel(path string) string {
+ path = strings.ToLower(path)
+
+ // replace all numbers and chars that are not a-z, / or _
+ reg := regexp.MustCompile("[^a-z/_]+")
+ path = reg.ReplaceAllString(path, "")
+
+ // replace all artifacts of number replacement (//)
+ path = strings.ReplaceAll(path, "//", "/")
+
+ // replace the /v/ that indicated the API version
+ return strings.Replace(path, "/v/", "/", 1)
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go
index 18e161041..b3d97bade 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go
@@ -122,7 +122,7 @@ func (c *ISOClient) All(ctx context.Context) ([]*ISO, error) {
opts := ISOListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
isos, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go
index 6de4a36a3..2858c81c4 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go
@@ -42,12 +42,14 @@ type LoadBalancerPublicNet struct {
// LoadBalancerPublicNetIPv4 represents a Load Balancer's public IPv4 address.
type LoadBalancerPublicNetIPv4 struct {
- IP net.IP
+ IP net.IP
+ DNSPtr string
}
// LoadBalancerPublicNetIPv6 represents a Load Balancer's public IPv6 address.
type LoadBalancerPublicNetIPv6 struct {
- IP net.IP
+ IP net.IP
+ DNSPtr string
}
// LoadBalancerPrivateNet represents a Load Balancer's private network.
@@ -195,6 +197,44 @@ type LoadBalancerProtection struct {
Delete bool
}
+// changeDNSPtr changes or resets the reverse DNS pointer for a IP address.
+// Pass a nil ptr to reset the reverse DNS pointer to its default value.
+func (lb *LoadBalancer) changeDNSPtr(ctx context.Context, client *Client, ip net.IP, ptr *string) (*Action, *Response, error) {
+ reqBody := schema.LoadBalancerActionChangeDNSPtrRequest{
+ IP: ip.String(),
+ DNSPtr: ptr,
+ }
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/load_balancers/%d/actions/change_dns_ptr", lb.ID)
+ req, err := client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ respBody := schema.LoadBalancerActionChangeDNSPtrResponse{}
+ resp, err := client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionFromSchema(respBody.Action), resp, nil
+}
+
+// GetDNSPtrForIP searches for the dns assigned to the given IP address.
+// It returns an error if there is no dns set for the given IP address.
+func (lb *LoadBalancer) GetDNSPtrForIP(ip net.IP) (string, error) {
+ if net.IP.Equal(lb.PublicNet.IPv4.IP, ip) {
+ return lb.PublicNet.IPv4.DNSPtr, nil
+ } else if net.IP.Equal(lb.PublicNet.IPv6.IP, ip) {
+ return lb.PublicNet.IPv6.DNSPtr, nil
+ }
+
+ return "", DNSNotFoundError{ip}
+}
+
// LoadBalancerClient is a client for the Load Balancers API.
type LoadBalancerClient struct {
client *Client
@@ -283,7 +323,7 @@ func (c *LoadBalancerClient) All(ctx context.Context) ([]*LoadBalancer, error) {
opts := LoadBalancerListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
LoadBalancer, resp, err := c.List(ctx, opts)
if err != nil {
@@ -303,7 +343,7 @@ func (c *LoadBalancerClient) All(ctx context.Context) ([]*LoadBalancer, error) {
func (c *LoadBalancerClient) AllWithOpts(ctx context.Context, opts LoadBalancerListOpts) ([]*LoadBalancer, error) {
var allLoadBalancers []*LoadBalancer
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
LoadBalancers, resp, err := c.List(ctx, opts)
if err != nil {
@@ -1024,3 +1064,13 @@ func (c *LoadBalancerClient) GetMetrics(
}
return ms, resp, nil
}
+
+// ChangeDNSPtr changes or resets the reverse DNS pointer for a Load Balancer.
+// Pass a nil ptr to reset the reverse DNS pointer to its default value.
+func (c *LoadBalancerClient) ChangeDNSPtr(ctx context.Context, lb *LoadBalancer, ip string, ptr *string) (*Action, *Response, error) {
+ netIP := net.ParseIP(ip)
+ if netIP == nil {
+ return nil, nil, InvalidIPError{ip}
+ }
+ return lb.changeDNSPtr(ctx, c.client, net.ParseIP(ip), ptr)
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go
index 12202d02b..73800048f 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go
@@ -109,7 +109,7 @@ func (c *LoadBalancerTypeClient) All(ctx context.Context) ([]*LoadBalancerType,
opts := LoadBalancerTypeListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
LoadBalancerTypes, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go
index ad2fff854..05f42329b 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go
@@ -109,7 +109,7 @@ func (c *LocationClient) All(ctx context.Context) ([]*Location, error) {
opts := LocationListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
locations, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go
index 8657592e8..2f39d8586 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go
@@ -155,7 +155,7 @@ func (c *NetworkClient) All(ctx context.Context) ([]*Network, error) {
func (c *NetworkClient) AllWithOpts(ctx context.Context, opts NetworkListOpts) ([]*Network, error) {
var allNetworks []*Network
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
Networks, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go
new file mode 100644
index 000000000..d8df1952a
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go
@@ -0,0 +1,243 @@
+package hcloud
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/hetznercloud/hcloud-go/hcloud/schema"
+)
+
+// PlacementGroup represents a Placement Group in the Hetzner Cloud.
+type PlacementGroup struct {
+ ID int
+ Name string
+ Labels map[string]string
+ Created time.Time
+ Servers []int
+ Type PlacementGroupType
+}
+
+// PlacementGroupType specifies the type of a Placement Group
+type PlacementGroupType string
+
+const (
+ // PlacementGroupTypeSpread spreads all servers in the group on different vhosts
+ PlacementGroupTypeSpread PlacementGroupType = "spread"
+)
+
+// PlacementGroupClient is a client for the Placement Groups API.
+type PlacementGroupClient struct {
+ client *Client
+}
+
+// GetByID retrieves a PlacementGroup by its ID. If the PlacementGroup does not exist, nil is returned.
+func (c *PlacementGroupClient) GetByID(ctx context.Context, id int) (*PlacementGroup, *Response, error) {
+ req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/placement_groups/%d", id), nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body schema.PlacementGroupGetResponse
+ resp, err := c.client.Do(req, &body)
+ if err != nil {
+ if IsError(err, ErrorCodeNotFound) {
+ return nil, resp, nil
+ }
+ return nil, nil, err
+ }
+ return PlacementGroupFromSchema(body.PlacementGroup), resp, nil
+}
+
+// GetByName retrieves a PlacementGroup by its name. If the PlacementGroup does not exist, nil is returned.
+func (c *PlacementGroupClient) GetByName(ctx context.Context, name string) (*PlacementGroup, *Response, error) {
+ if name == "" {
+ return nil, nil, nil
+ }
+ placementGroups, response, err := c.List(ctx, PlacementGroupListOpts{Name: name})
+ if len(placementGroups) == 0 {
+ return nil, response, err
+ }
+ return placementGroups[0], response, err
+}
+
+// Get retrieves a PlacementGroup by its ID if the input can be parsed as an integer, otherwise it
+// retrieves a PlacementGroup by its name. If the PlacementGroup does not exist, nil is returned.
+func (c *PlacementGroupClient) Get(ctx context.Context, idOrName string) (*PlacementGroup, *Response, error) {
+ if id, err := strconv.Atoi(idOrName); err == nil {
+ return c.GetByID(ctx, int(id))
+ }
+ return c.GetByName(ctx, idOrName)
+}
+
+// PlacementGroupListOpts specifies options for listing PlacementGroup.
+type PlacementGroupListOpts struct {
+ ListOpts
+ Name string
+ Type PlacementGroupType
+}
+
+func (l PlacementGroupListOpts) values() url.Values {
+ vals := l.ListOpts.values()
+ if l.Name != "" {
+ vals.Add("name", l.Name)
+ }
+ if l.Type != "" {
+ vals.Add("type", string(l.Type))
+ }
+ return vals
+}
+
+// List returns a list of PlacementGroups for a specific page.
+//
+// Please note that filters specified in opts are not taken into account
+// when their value corresponds to their zero value or when they are empty.
+func (c *PlacementGroupClient) List(ctx context.Context, opts PlacementGroupListOpts) ([]*PlacementGroup, *Response, error) {
+ path := "/placement_groups?" + opts.values().Encode()
+ req, err := c.client.NewRequest(ctx, "GET", path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body schema.PlacementGroupListResponse
+ resp, err := c.client.Do(req, &body)
+ if err != nil {
+ return nil, nil, err
+ }
+ placementGroups := make([]*PlacementGroup, 0, len(body.PlacementGroups))
+ for _, g := range body.PlacementGroups {
+ placementGroups = append(placementGroups, PlacementGroupFromSchema(g))
+ }
+ return placementGroups, resp, nil
+}
+
+// All returns all PlacementGroups.
+func (c *PlacementGroupClient) All(ctx context.Context) ([]*PlacementGroup, error) {
+ opts := PlacementGroupListOpts{
+ ListOpts: ListOpts{
+ PerPage: 50,
+ },
+ }
+
+ return c.AllWithOpts(ctx, opts)
+}
+
+// AllWithOpts returns all PlacementGroups for the given options.
+func (c *PlacementGroupClient) AllWithOpts(ctx context.Context, opts PlacementGroupListOpts) ([]*PlacementGroup, error) {
+ var allPlacementGroups []*PlacementGroup
+
+ err := c.client.all(func(page int) (*Response, error) {
+ opts.Page = page
+ placementGroups, resp, err := c.List(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ allPlacementGroups = append(allPlacementGroups, placementGroups...)
+ return resp, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return allPlacementGroups, nil
+}
+
+// PlacementGroupCreateOpts specifies options for creating a new PlacementGroup.
+type PlacementGroupCreateOpts struct {
+ Name string
+ Labels map[string]string
+ Type PlacementGroupType
+}
+
+// Validate checks if options are valid
+func (o PlacementGroupCreateOpts) Validate() error {
+ if o.Name == "" {
+ return errors.New("missing name")
+ }
+ return nil
+}
+
+// PlacementGroupCreateResult is the result of a create PlacementGroup call.
+type PlacementGroupCreateResult struct {
+ PlacementGroup *PlacementGroup
+ Action *Action
+}
+
+// Create creates a new PlacementGroup
+func (c *PlacementGroupClient) Create(ctx context.Context, opts PlacementGroupCreateOpts) (PlacementGroupCreateResult, *Response, error) {
+ if err := opts.Validate(); err != nil {
+ return PlacementGroupCreateResult{}, nil, err
+ }
+ reqBody := placementGroupCreateOptsToSchema(opts)
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return PlacementGroupCreateResult{}, nil, err
+ }
+ req, err := c.client.NewRequest(ctx, "POST", "/placement_groups", bytes.NewReader(reqBodyData))
+ if err != nil {
+ return PlacementGroupCreateResult{}, nil, err
+ }
+
+ respBody := schema.PlacementGroupCreateResponse{}
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return PlacementGroupCreateResult{}, nil, err
+ }
+ result := PlacementGroupCreateResult{
+ PlacementGroup: PlacementGroupFromSchema(respBody.PlacementGroup),
+ }
+ if respBody.Action != nil {
+ result.Action = ActionFromSchema(*respBody.Action)
+ }
+
+ return result, resp, nil
+}
+
+// PlacementGroupUpdateOpts specifies options for updating a PlacementGroup.
+type PlacementGroupUpdateOpts struct {
+ Name string
+ Labels map[string]string
+}
+
+// Update updates a PlacementGroup.
+func (c *PlacementGroupClient) Update(ctx context.Context, placementGroup *PlacementGroup, opts PlacementGroupUpdateOpts) (*PlacementGroup, *Response, error) {
+ reqBody := schema.PlacementGroupUpdateRequest{}
+ if opts.Name != "" {
+ reqBody.Name = &opts.Name
+ }
+ if opts.Labels != nil {
+ reqBody.Labels = &opts.Labels
+ }
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/placement_groups/%d", placementGroup.ID)
+ req, err := c.client.NewRequest(ctx, "PUT", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ respBody := schema.PlacementGroupUpdateResponse{}
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return PlacementGroupFromSchema(respBody.PlacementGroup), resp, nil
+}
+
+// Delete deletes a PlacementGroup.
+func (c *PlacementGroupClient) Delete(ctx context.Context, placementGroup *PlacementGroup) (*Response, error) {
+ req, err := c.client.NewRequest(ctx, "DELETE", fmt.Sprintf("/placement_groups/%d", placementGroup.ID), nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.client.Do(req, nil)
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/pricing.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/pricing.go
index 618907a14..5d1b23b81 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/pricing.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/pricing.go
@@ -10,10 +10,12 @@ import (
type Pricing struct {
Image ImagePricing
FloatingIP FloatingIPPricing
+ FloatingIPs []FloatingIPTypePricing
Traffic TrafficPricing
ServerBackup ServerBackupPricing
ServerTypes []ServerTypePricing
LoadBalancerTypes []LoadBalancerTypePricing
+ Volume VolumePricing
}
// Price represents a price. Net amount, gross amount, as well as VAT rate are
@@ -36,11 +38,29 @@ type FloatingIPPricing struct {
Monthly Price
}
+// FloatingIPTypePricing provides pricing information for Floating IPs per Type.
+type FloatingIPTypePricing struct {
+ Type FloatingIPType
+ Pricings []FloatingIPTypeLocationPricing
+}
+
+// FloatingIPTypeLocationPricing provides pricing information for a Floating IP type
+// at a location.
+type FloatingIPTypeLocationPricing struct {
+ Location *Location
+ Monthly Price
+}
+
// TrafficPricing provides pricing information for traffic.
type TrafficPricing struct {
PerTB Price
}
+// VolumePricing provides pricing information for a Volume.
+type VolumePricing struct {
+ PerGBMonthly Price
+}
+
// ServerBackupPricing provides pricing information for server backups.
type ServerBackupPricing struct {
Percentage string
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/rdns.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/rdns.go
new file mode 100644
index 000000000..70d31f93e
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/rdns.go
@@ -0,0 +1,46 @@
+package hcloud
+
+import (
+ "context"
+ "fmt"
+ "net"
+)
+
+// RDNSSupporter defines functions to change and lookup reverse dns entries.
+// currently implemented by Server, FloatingIP and LoadBalancer
+type RDNSSupporter interface {
+ // changeDNSPtr changes or resets the reverse DNS pointer for a IP address.
+ // Pass a nil ptr to reset the reverse DNS pointer to its default value.
+ changeDNSPtr(ctx context.Context, client *Client, ip net.IP, ptr *string) (*Action, *Response, error)
+ // GetDNSPtrForIP searches for the dns assigned to the given IP address.
+ // It returns an error if there is no dns set for the given IP address.
+ GetDNSPtrForIP(ip net.IP) (string, error)
+}
+
+// RDNSClient simplifys the handling objects which support reverse dns entries.
+type RDNSClient struct {
+ client *Client
+}
+
+// ChangeDNSPtr changes or resets the reverse DNS pointer for a IP address.
+// Pass a nil ptr to reset the reverse DNS pointer to its default value.
+func (c *RDNSClient) ChangeDNSPtr(ctx context.Context, rdns RDNSSupporter, ip net.IP, ptr *string) (*Action, *Response, error) {
+ return rdns.changeDNSPtr(ctx, c.client, ip, ptr)
+}
+
+// SupportsRDNS checks if the object supports reverse dns functions.
+func SupportsRDNS(i interface{}) bool {
+ _, ok := i.(RDNSSupporter)
+ return ok
+}
+
+// RDNSLookup searches for the dns assigned to the given IP address.
+// It returns an error if the object does not support reverse dns or if there is no dns set for the given IP address.
+func RDNSLookup(i interface{}, ip net.IP) (string, error) {
+ rdns, ok := i.(RDNSSupporter)
+ if !ok {
+ return "", fmt.Errorf("%+v does not support RDNS", i)
+ }
+
+ return rdns.GetDNSPtrForIP(ip)
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/resource.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/resource.go
new file mode 100644
index 000000000..8a734dfd6
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/resource.go
@@ -0,0 +1,7 @@
+package hcloud
+
+// Resource defines the schema of a resource.
+type Resource struct {
+ ID int
+ Type string
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go
index 6a04df0d6..e7918cf47 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go
@@ -40,9 +40,9 @@ func ActionFromSchema(s schema.Action) *Action {
// ActionsFromSchema converts a slice of schema.Action to a slice of Action.
func ActionsFromSchema(s []schema.Action) []*Action {
- var actions []*Action
- for _, a := range s {
- actions = append(actions, ActionFromSchema(a))
+ actions := make([]*Action, len(s))
+ for i, a := range s {
+ actions[i] = ActionFromSchema(a)
}
return actions
}
@@ -172,6 +172,9 @@ func ServerFromSchema(s schema.Server) *Server {
for _, privNet := range s.PrivateNet {
server.PrivateNet = append(server.PrivateNet, ServerPrivateNetFromSchema(privNet))
}
+ if s.PlacementGroup != nil {
+ server.PlacementGroup = PlacementGroupFromSchema(*s.PlacementGroup)
+ }
return server
}
@@ -184,6 +187,13 @@ func ServerPublicNetFromSchema(s schema.ServerPublicNet) ServerPublicNet {
for _, id := range s.FloatingIPs {
publicNet.FloatingIPs = append(publicNet.FloatingIPs, &FloatingIP{ID: id})
}
+ for _, fw := range s.Firewalls {
+ publicNet.Firewalls = append(publicNet.Firewalls,
+ &ServerFirewallStatus{
+ Firewall: Firewall{ID: fw.ID},
+ Status: FirewallStatus(fw.Status)},
+ )
+ }
return publicNet
}
@@ -284,6 +294,7 @@ func ImageFromSchema(s schema.Image) *Image {
Delete: s.Protection.Delete,
},
Deprecated: s.Deprecated,
+ Deleted: s.Deleted,
}
if s.Name != nil {
i.Name = *s.Name
@@ -422,10 +433,12 @@ func LoadBalancerFromSchema(s schema.LoadBalancer) *LoadBalancer {
PublicNet: LoadBalancerPublicNet{
Enabled: s.PublicNet.Enabled,
IPv4: LoadBalancerPublicNetIPv4{
- IP: net.ParseIP(s.PublicNet.IPv4.IP),
+ IP: net.ParseIP(s.PublicNet.IPv4.IP),
+ DNSPtr: s.PublicNet.IPv4.DNSPtr,
},
IPv6: LoadBalancerPublicNetIPv6{
- IP: net.ParseIP(s.PublicNet.IPv6.IP),
+ IP: net.ParseIP(s.PublicNet.IPv6.IP),
+ DNSPtr: s.PublicNet.IPv6.DNSPtr,
},
},
Location: LocationFromSchema(s.Location),
@@ -548,6 +561,7 @@ func CertificateFromSchema(s schema.Certificate) *Certificate {
c := &Certificate{
ID: s.ID,
Name: s.Name,
+ Type: CertificateType(s.Type),
Certificate: s.Certificate,
Created: s.Created,
NotValidBefore: s.NotValidBefore,
@@ -555,12 +569,26 @@ func CertificateFromSchema(s schema.Certificate) *Certificate {
DomainNames: s.DomainNames,
Fingerprint: s.Fingerprint,
}
+ if s.Status != nil {
+ c.Status = &CertificateStatus{
+ Issuance: CertificateStatusType(s.Status.Issuance),
+ Renewal: CertificateStatusType(s.Status.Renewal),
+ }
+ if s.Status.Error != nil {
+ certErr := ErrorFromSchema(*s.Status.Error)
+ c.Status.Error = &certErr
+ }
+ }
if len(s.Labels) > 0 {
- c.Labels = make(map[string]string)
+ c.Labels = s.Labels
}
- for key, value := range s.Labels {
- c.Labels[key] = value
+ if len(s.UsedBy) > 0 {
+ c.UsedBy = make([]CertificateUsedByRef, len(s.UsedBy))
+ for i, ref := range s.UsedBy {
+ c.UsedBy[i] = CertificateUsedByRef{ID: ref.ID, Type: CertificateUsedByRefType(ref.Type)}
+ }
}
+
return c
}
@@ -583,8 +611,7 @@ func ErrorFromSchema(s schema.Error) Error {
Message: s.Message,
}
- switch d := s.Details.(type) {
- case schema.ErrorDetailsInvalidInput:
+ if d, ok := s.Details.(schema.ErrorDetailsInvalidInput); ok {
details := ErrorDetailsInvalidInput{
Fields: []ErrorDetailsInvalidInputField{},
}
@@ -629,6 +656,30 @@ func PricingFromSchema(s schema.Pricing) Pricing {
ServerBackup: ServerBackupPricing{
Percentage: s.ServerBackup.Percentage,
},
+ Volume: VolumePricing{
+ PerGBMonthly: Price{
+ Currency: s.Currency,
+ VATRate: s.VATRate,
+ Net: s.Volume.PricePerGBPerMonth.Net,
+ Gross: s.Volume.PricePerGBPerMonth.Gross,
+ },
+ },
+ }
+ for _, floatingIPType := range s.FloatingIPs {
+ var pricings []FloatingIPTypeLocationPricing
+ for _, price := range floatingIPType.Prices {
+ p := FloatingIPTypeLocationPricing{
+ Location: &Location{Name: price.Location},
+ Monthly: Price{
+ Currency: s.Currency,
+ VATRate: s.VATRate,
+ Net: price.PriceMonthly.Net,
+ Gross: price.PriceMonthly.Gross,
+ },
+ }
+ pricings = append(pricings, p)
+ }
+ p.FloatingIPs = append(p.FloatingIPs, FloatingIPTypePricing{Type: FloatingIPType(floatingIPType.Type), Pricings: pricings})
}
for _, serverType := range s.ServerTypes {
var pricings []ServerTypeLocationPricing
@@ -687,6 +738,78 @@ func PricingFromSchema(s schema.Pricing) Pricing {
return p
}
+// FirewallFromSchema converts a schema.Firewall to a Firewall.
+func FirewallFromSchema(s schema.Firewall) *Firewall {
+ f := &Firewall{
+ ID: s.ID,
+ Name: s.Name,
+ Labels: map[string]string{},
+ Created: s.Created,
+ }
+ for key, value := range s.Labels {
+ f.Labels[key] = value
+ }
+ for _, res := range s.AppliedTo {
+ r := FirewallResource{Type: FirewallResourceType(res.Type)}
+ switch r.Type {
+ case FirewallResourceTypeLabelSelector:
+ r.LabelSelector = &FirewallResourceLabelSelector{Selector: res.LabelSelector.Selector}
+ case FirewallResourceTypeServer:
+ r.Server = &FirewallResourceServer{ID: res.Server.ID}
+ }
+ f.AppliedTo = append(f.AppliedTo, r)
+ }
+ for _, rule := range s.Rules {
+ sourceIPs := []net.IPNet{}
+ for _, sourceIP := range rule.SourceIPs {
+ _, mask, err := net.ParseCIDR(sourceIP)
+ if err == nil && mask != nil {
+ sourceIPs = append(sourceIPs, *mask)
+ }
+ }
+ destinationIPs := []net.IPNet{}
+ for _, destinationIP := range rule.DestinationIPs {
+ _, mask, err := net.ParseCIDR(destinationIP)
+ if err == nil && mask != nil {
+ destinationIPs = append(destinationIPs, *mask)
+ }
+ }
+ f.Rules = append(f.Rules, FirewallRule{
+ Direction: FirewallRuleDirection(rule.Direction),
+ SourceIPs: sourceIPs,
+ DestinationIPs: destinationIPs,
+ Protocol: FirewallRuleProtocol(rule.Protocol),
+ Port: rule.Port,
+ Description: rule.Description,
+ })
+ }
+ return f
+}
+
+// PlacementGroupFromSchema converts a schema.PlacementGroup to a PlacementGroup.
+func PlacementGroupFromSchema(s schema.PlacementGroup) *PlacementGroup {
+ g := &PlacementGroup{
+ ID: s.ID,
+ Name: s.Name,
+ Labels: s.Labels,
+ Created: s.Created,
+ Servers: s.Servers,
+ Type: PlacementGroupType(s.Type),
+ }
+ return g
+}
+
+func placementGroupCreateOptsToSchema(opts PlacementGroupCreateOpts) schema.PlacementGroupCreateRequest {
+ req := schema.PlacementGroupCreateRequest{
+ Name: opts.Name,
+ Type: string(opts.Type),
+ }
+ if opts.Labels != nil {
+ req.Labels = &opts.Labels
+ }
+ return req
+}
+
func loadBalancerCreateOptsToSchema(opts LoadBalancerCreateOpts) schema.LoadBalancerCreateRequest {
req := schema.LoadBalancerCreateRequest{
Name: opts.Name,
@@ -746,8 +869,10 @@ func loadBalancerCreateOptsToSchema(opts LoadBalancerCreateOpts) schema.LoadBala
StickySessions: service.HTTP.StickySessions,
CookieName: service.HTTP.CookieName,
}
- if sec := service.HTTP.CookieLifetime.Seconds(); sec != 0 {
- schemaService.HTTP.CookieLifetime = Int(int(sec))
+ if service.HTTP.CookieLifetime != nil {
+ if sec := service.HTTP.CookieLifetime.Seconds(); sec != 0 {
+ schemaService.HTTP.CookieLifetime = Int(int(sec))
+ }
}
if service.HTTP.Certificates != nil {
certificates := []int{}
@@ -893,6 +1018,91 @@ func loadBalancerUpdateServiceOptsToSchema(opts LoadBalancerUpdateServiceOpts) s
return req
}
+func firewallCreateOptsToSchema(opts FirewallCreateOpts) schema.FirewallCreateRequest {
+ req := schema.FirewallCreateRequest{
+ Name: opts.Name,
+ }
+ if opts.Labels != nil {
+ req.Labels = &opts.Labels
+ }
+ for _, rule := range opts.Rules {
+ schemaRule := schema.FirewallRule{
+ Direction: string(rule.Direction),
+ Protocol: string(rule.Protocol),
+ Port: rule.Port,
+ Description: rule.Description,
+ }
+ switch rule.Direction {
+ case FirewallRuleDirectionOut:
+ schemaRule.DestinationIPs = make([]string, len(rule.DestinationIPs))
+ for i, destinationIP := range rule.DestinationIPs {
+ schemaRule.DestinationIPs[i] = destinationIP.String()
+ }
+ case FirewallRuleDirectionIn:
+ schemaRule.SourceIPs = make([]string, len(rule.SourceIPs))
+ for i, sourceIP := range rule.SourceIPs {
+ schemaRule.SourceIPs[i] = sourceIP.String()
+ }
+ }
+ req.Rules = append(req.Rules, schemaRule)
+ }
+ for _, res := range opts.ApplyTo {
+ schemaFirewallResource := schema.FirewallResource{
+ Type: string(res.Type),
+ }
+ switch res.Type {
+ case FirewallResourceTypeServer:
+ schemaFirewallResource.Server = &schema.FirewallResourceServer{
+ ID: res.Server.ID,
+ }
+ case FirewallResourceTypeLabelSelector:
+ schemaFirewallResource.LabelSelector = &schema.FirewallResourceLabelSelector{Selector: res.LabelSelector.Selector}
+ }
+
+ req.ApplyTo = append(req.ApplyTo, schemaFirewallResource)
+ }
+ return req
+}
+
+func firewallSetRulesOptsToSchema(opts FirewallSetRulesOpts) schema.FirewallActionSetRulesRequest {
+ req := schema.FirewallActionSetRulesRequest{Rules: []schema.FirewallRule{}}
+ for _, rule := range opts.Rules {
+ schemaRule := schema.FirewallRule{
+ Direction: string(rule.Direction),
+ Protocol: string(rule.Protocol),
+ Port: rule.Port,
+ Description: rule.Description,
+ }
+ switch rule.Direction {
+ case FirewallRuleDirectionOut:
+ schemaRule.DestinationIPs = make([]string, len(rule.DestinationIPs))
+ for i, destinationIP := range rule.DestinationIPs {
+ schemaRule.DestinationIPs[i] = destinationIP.String()
+ }
+ case FirewallRuleDirectionIn:
+ schemaRule.SourceIPs = make([]string, len(rule.SourceIPs))
+ for i, sourceIP := range rule.SourceIPs {
+ schemaRule.SourceIPs[i] = sourceIP.String()
+ }
+ }
+ req.Rules = append(req.Rules, schemaRule)
+ }
+ return req
+}
+
+func firewallResourceToSchema(resource FirewallResource) schema.FirewallResource {
+ s := schema.FirewallResource{
+ Type: string(resource.Type),
+ }
+ switch resource.Type {
+ case FirewallResourceTypeLabelSelector:
+ s.LabelSelector = &schema.FirewallResourceLabelSelector{Selector: resource.LabelSelector.Selector}
+ case FirewallResourceTypeServer:
+ s.Server = &schema.FirewallResourceServer{ID: resource.Server.ID}
+ }
+ return s
+}
+
func serverMetricsFromSchema(s *schema.ServerGetMetricsResponse) (*ServerMetrics, error) {
ms := ServerMetrics{
Start: s.Metrics.Start,
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/certificate.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/certificate.go
index 251add4dd..a81b807a2 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/certificate.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/certificate.go
@@ -2,17 +2,32 @@ package schema
import "time"
+// CertificateUsedByRef defines the schema of a resource using a certificate.
+type CertificateUsedByRef struct {
+ ID int `json:"id"`
+ Type string `json:"type"`
+}
+
+type CertificateStatusRef struct {
+ Issuance string `json:"issuance"`
+ Renewal string `json:"renewal"`
+ Error *Error `json:"error,omitempty"`
+}
+
// Certificate defines the schema of an certificate.
type Certificate struct {
- ID int `json:"id"`
- Name string `json:"name"`
- Labels map[string]string `json:"labels"`
- Certificate string `json:"certificate"`
- Created time.Time `json:"created"`
- NotValidBefore time.Time `json:"not_valid_before"`
- NotValidAfter time.Time `json:"not_valid_after"`
- DomainNames []string `json:"domain_names"`
- Fingerprint string `json:"fingerprint"`
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Labels map[string]string `json:"labels"`
+ Type string `json:"type"`
+ Certificate string `json:"certificate"`
+ Created time.Time `json:"created"`
+ NotValidBefore time.Time `json:"not_valid_before"`
+ NotValidAfter time.Time `json:"not_valid_after"`
+ DomainNames []string `json:"domain_names"`
+ Fingerprint string `json:"fingerprint"`
+ Status *CertificateStatusRef `json:"status"`
+ UsedBy []CertificateUsedByRef `json:"used_by"`
}
// CertificateListResponse defines the schema of the response when
@@ -30,14 +45,17 @@ type CertificateGetResponse struct {
// CertificateCreateRequest defines the schema of the request to create a certificate.
type CertificateCreateRequest struct {
Name string `json:"name"`
- Certificate string `json:"certificate"`
- PrivateKey string `json:"private_key"`
+ Type string `json:"type"`
+ DomainNames []string `json:"domain_names,omitempty"`
+ Certificate string `json:"certificate,omitempty"`
+ PrivateKey string `json:"private_key,omitempty"`
Labels *map[string]string `json:"labels,omitempty"`
}
// CertificateCreateResponse defines the schema of the response when creating a certificate.
type CertificateCreateResponse struct {
Certificate Certificate `json:"certificate"`
+ Action *Action `json:"action"`
}
// CertificateUpdateRequest defines the schema of the request to update a certificate.
@@ -50,3 +68,9 @@ type CertificateUpdateRequest struct {
type CertificateUpdateResponse struct {
Certificate Certificate `json:"certificate"`
}
+
+// CertificateIssuanceRetryResponse defines the schema for the response of the
+// retry issuance endpoint.
+type CertificateIssuanceRetryResponse struct {
+ Action Action `json:"action"`
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/error.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/error.go
index 6bcb6ecad..2d5cf5ddd 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/error.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/error.go
@@ -17,8 +17,7 @@ func (e *Error) UnmarshalJSON(data []byte) (err error) {
if err = json.Unmarshal(data, alias); err != nil {
return
}
- switch e.Code {
- case "invalid_input":
+ if e.Code == "invalid_input" {
details := ErrorDetailsInvalidInput{}
if err = json.Unmarshal(e.DetailsRaw, &details); err != nil {
return
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/firewall.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/firewall.go
new file mode 100644
index 000000000..b085bbb13
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/firewall.go
@@ -0,0 +1,105 @@
+package schema
+
+import "time"
+
+// Firewall defines the schema of a Firewall.
+type Firewall struct {
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Labels map[string]string `json:"labels"`
+ Created time.Time `json:"created"`
+ Rules []FirewallRule `json:"rules"`
+ AppliedTo []FirewallResource `json:"applied_to"`
+}
+
+// FirewallRule defines the schema of a Firewall rule.
+type FirewallRule struct {
+ Direction string `json:"direction"`
+ SourceIPs []string `json:"source_ips,omitempty"`
+ DestinationIPs []string `json:"destination_ips,omitempty"`
+ Protocol string `json:"protocol"`
+ Port *string `json:"port,omitempty"`
+ Description *string `json:"description,omitempty"`
+}
+
+// FirewallListResponse defines the schema of the response when listing Firewalls.
+type FirewallListResponse struct {
+ Firewalls []Firewall `json:"firewalls"`
+}
+
+// FirewallGetResponse defines the schema of the response when retrieving a single Firewall.
+type FirewallGetResponse struct {
+ Firewall Firewall `json:"firewall"`
+}
+
+// FirewallCreateRequest defines the schema of the request to create a Firewall.
+type FirewallCreateRequest struct {
+ Name string `json:"name"`
+ Labels *map[string]string `json:"labels,omitempty"`
+ Rules []FirewallRule `json:"rules,omitempty"`
+ ApplyTo []FirewallResource `json:"apply_to,omitempty"`
+}
+
+// FirewallResource defines the schema of a resource to apply the new Firewall on.
+type FirewallResource struct {
+ Type string `json:"type"`
+ Server *FirewallResourceServer `json:"server,omitempty"`
+ LabelSelector *FirewallResourceLabelSelector `json:"label_selector,omitempty"`
+}
+
+// FirewallResourceLabelSelector defines the schema of a LabelSelector to apply a Firewall on.
+type FirewallResourceLabelSelector struct {
+ Selector string `json:"selector"`
+}
+
+// FirewallResourceServer defines the schema of a Server to apply a Firewall on.
+type FirewallResourceServer struct {
+ ID int `json:"id"`
+}
+
+// FirewallCreateResponse defines the schema of the response when creating a Firewall.
+type FirewallCreateResponse struct {
+ Firewall Firewall `json:"firewall"`
+ Actions []Action `json:"actions"`
+}
+
+// FirewallUpdateRequest defines the schema of the request to update a Firewall.
+type FirewallUpdateRequest struct {
+ Name *string `json:"name,omitempty"`
+ Labels *map[string]string `json:"labels,omitempty"`
+}
+
+// FirewallUpdateResponse defines the schema of the response when updating a Firewall.
+type FirewallUpdateResponse struct {
+ Firewall Firewall `json:"firewall"`
+}
+
+// FirewallActionSetRulesRequest defines the schema of the request when setting Firewall rules.
+type FirewallActionSetRulesRequest struct {
+ Rules []FirewallRule `json:"rules"`
+}
+
+// FirewallActionSetRulesResponse defines the schema of the response when setting Firewall rules.
+type FirewallActionSetRulesResponse struct {
+ Actions []Action `json:"actions"`
+}
+
+// FirewallActionApplyToResourcesRequest defines the schema of the request when applying a Firewall on resources.
+type FirewallActionApplyToResourcesRequest struct {
+ ApplyTo []FirewallResource `json:"apply_to"`
+}
+
+// FirewallActionApplyToResourcesResponse defines the schema of the response when applying a Firewall on resources.
+type FirewallActionApplyToResourcesResponse struct {
+ Actions []Action `json:"actions"`
+}
+
+// FirewallActionRemoveFromResourcesRequest defines the schema of the request when removing a Firewall from resources.
+type FirewallActionRemoveFromResourcesRequest struct {
+ RemoveFrom []FirewallResource `json:"remove_from"`
+}
+
+// FirewallActionRemoveFromResourcesResponse defines the schema of the response when removing a Firewall from resources.
+type FirewallActionRemoveFromResourcesResponse struct {
+ Actions []Action `json:"actions"`
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go
index c354d9ea9..7a3be8875 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go
@@ -19,6 +19,7 @@ type Image struct {
RapidDeploy bool `json:"rapid_deploy"`
Protection ImageProtection `json:"protection"`
Deprecated time.Time `json:"deprecated"`
+ Deleted time.Time `json:"deleted"`
Labels map[string]string `json:"labels"`
}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer.go
index b9dbaa9a7..68adf5eb6 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer.go
@@ -27,11 +27,13 @@ type LoadBalancerPublicNet struct {
}
type LoadBalancerPublicNetIPv4 struct {
- IP string `json:"ip"`
+ IP string `json:"ip"`
+ DNSPtr string `json:"dns_ptr"`
}
type LoadBalancerPublicNetIPv6 struct {
- IP string `json:"ip"`
+ IP string `json:"ip"`
+ DNSPtr string `json:"dns_ptr"`
}
type LoadBalancerPrivateNet struct {
@@ -401,3 +403,16 @@ type LoadBalancerGetMetricsResponse struct {
type LoadBalancerTimeSeriesVals struct {
Values []interface{} `json:"values"`
}
+
+// LoadBalancerActionChangeDNSPtrRequest defines the schema for the request to
+// change a Load Balancer reverse DNS pointer.
+type LoadBalancerActionChangeDNSPtrRequest struct {
+ IP string `json:"ip"`
+ DNSPtr *string `json:"dns_ptr"`
+}
+
+// LoadBalancerActionChangeDNSPtrResponse defines the schema of the response when
+// creating a change_dns_ptr Floating IP action.
+type LoadBalancerActionChangeDNSPtrResponse struct {
+ Action Action `json:"action"`
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/placement_group.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/placement_group.go
new file mode 100644
index 000000000..6bee4390c
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/placement_group.go
@@ -0,0 +1,40 @@
+package schema
+
+import "time"
+
+type PlacementGroup struct {
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Labels map[string]string `json:"labels"`
+ Created time.Time `json:"created"`
+ Servers []int `json:"servers"`
+ Type string `json:"type"`
+}
+
+type PlacementGroupListResponse struct {
+ PlacementGroups []PlacementGroup `json:"placement_groups"`
+}
+
+type PlacementGroupGetResponse struct {
+ PlacementGroup PlacementGroup `json:"placement_group"`
+}
+
+type PlacementGroupCreateRequest struct {
+ Name string `json:"name"`
+ Labels *map[string]string `json:"labels,omitempty"`
+ Type string `json:"type"`
+}
+
+type PlacementGroupCreateResponse struct {
+ PlacementGroup PlacementGroup `json:"placement_group"`
+ Action *Action `json:"action"`
+}
+
+type PlacementGroupUpdateRequest struct {
+ Name *string `json:"name,omitempty"`
+ Labels *map[string]string `json:"labels,omitempty"`
+}
+
+type PlacementGroupUpdateResponse struct {
+ PlacementGroup PlacementGroup `json:"placement_group"`
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/pricing.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/pricing.go
index 074ae4354..277fa1a39 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/pricing.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/pricing.go
@@ -6,10 +6,12 @@ type Pricing struct {
VATRate string `json:"vat_rate"`
Image PricingImage `json:"image"`
FloatingIP PricingFloatingIP `json:"floating_ip"`
+ FloatingIPs []PricingFloatingIPType `json:"floating_ips"`
Traffic PricingTraffic `json:"traffic"`
ServerBackup PricingServerBackup `json:"server_backup"`
ServerTypes []PricingServerType `json:"server_types"`
LoadBalancerTypes []PricingLoadBalancerType `json:"load_balancer_types"`
+ Volume PricingVolume `json:"volume"`
}
// Price defines the schema of a single price with net and gross amount.
@@ -28,11 +30,29 @@ type PricingFloatingIP struct {
PriceMonthly Price `json:"price_monthly"`
}
+// PricingFloatingIPType defines the schema of pricing information for a Floating IP per type.
+type PricingFloatingIPType struct {
+ Type string `json:"type"`
+ Prices []PricingFloatingIPTypePrice `json:"prices"`
+}
+
+// PricingFloatingIPTypePrice defines the schema of pricing information for a Floating IP
+// type at a location.
+type PricingFloatingIPTypePrice struct {
+ Location string `json:"location"`
+ PriceMonthly Price `json:"price_monthly"`
+}
+
// PricingTraffic defines the schema of pricing information for traffic.
type PricingTraffic struct {
PricePerTB Price `json:"price_per_tb"`
}
+// PricingVolume defines the schema of pricing information for a Volume.
+type PricingVolume struct {
+ PricePerGBPerMonth Price `json:"price_per_gb_month"`
+}
+
// PricingServerBackup defines the schema of pricing information for server backups.
type PricingServerBackup struct {
Percentage string `json:"percentage"`
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server.go
index 2a5ca1306..229e2889b 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server.go
@@ -24,6 +24,7 @@ type Server struct {
Labels map[string]string `json:"labels"`
Volumes []int `json:"volumes"`
PrimaryDiskSize int `json:"primary_disk_size"`
+ PlacementGroup *PlacementGroup `json:"placement_group"`
}
// ServerProtection defines the schema of a server's resource protection.
@@ -38,6 +39,7 @@ type ServerPublicNet struct {
IPv4 ServerPublicNetIPv4 `json:"ipv4"`
IPv6 ServerPublicNetIPv6 `json:"ipv6"`
FloatingIPs []int `json:"floating_ips"`
+ Firewalls []ServerFirewall `json:"firewalls"`
}
// ServerPublicNetIPv4 defines the schema of a server's public
@@ -63,6 +65,13 @@ type ServerPublicNetIPv6DNSPtr struct {
DNSPtr string `json:"dns_ptr"`
}
+// ServerFirewall defines the schema of a Server's Firewalls on
+// a certain network interface.
+type ServerFirewall struct {
+ ID int `json:"id"`
+ Status string `json:"status"`
+}
+
// ServerPrivateNet defines the schema of a server's private network information.
type ServerPrivateNet struct {
Network int `json:"network"`
@@ -86,18 +95,25 @@ type ServerListResponse struct {
// ServerCreateRequest defines the schema for the request to
// create a server.
type ServerCreateRequest struct {
- Name string `json:"name"`
- ServerType interface{} `json:"server_type"` // int or string
- Image interface{} `json:"image"` // int or string
- SSHKeys []int `json:"ssh_keys,omitempty"`
- Location string `json:"location,omitempty"`
- Datacenter string `json:"datacenter,omitempty"`
- UserData string `json:"user_data,omitempty"`
- StartAfterCreate *bool `json:"start_after_create,omitempty"`
- Labels *map[string]string `json:"labels,omitempty"`
- Automount *bool `json:"automount,omitempty"`
- Volumes []int `json:"volumes,omitempty"`
- Networks []int `json:"networks,omitempty"`
+ Name string `json:"name"`
+ ServerType interface{} `json:"server_type"` // int or string
+ Image interface{} `json:"image"` // int or string
+ SSHKeys []int `json:"ssh_keys,omitempty"`
+ Location string `json:"location,omitempty"`
+ Datacenter string `json:"datacenter,omitempty"`
+ UserData string `json:"user_data,omitempty"`
+ StartAfterCreate *bool `json:"start_after_create,omitempty"`
+ Labels *map[string]string `json:"labels,omitempty"`
+ Automount *bool `json:"automount,omitempty"`
+ Volumes []int `json:"volumes,omitempty"`
+ Networks []int `json:"networks,omitempty"`
+ Firewalls []ServerCreateFirewalls `json:"firewalls,omitempty"`
+ PlacementGroup int `json:"placement_group,omitempty"`
+}
+
+// ServerCreateFirewall defines which Firewalls to apply when creating a Server.
+type ServerCreateFirewalls struct {
+ Firewall int `json:"firewall"`
}
// ServerCreateResponse defines the schema of the response when
@@ -381,3 +397,21 @@ type ServerGetMetricsResponse struct {
type ServerTimeSeriesVals struct {
Values []interface{} `json:"values"`
}
+
+// ServerActionAddToPlacementGroupRequest defines the schema for the request to
+// add a server to a placement group.
+type ServerActionAddToPlacementGroupRequest struct {
+ PlacementGroup int `json:"placement_group"`
+}
+
+// ServerActionAddToPlacementGroupResponse defines the schema of the response when
+// creating an add_to_placement_group server action.
+type ServerActionAddToPlacementGroupResponse struct {
+ Action Action `json:"action"`
+}
+
+// ServerActionRemoveFromPlacementGroupResponse defines the schema of the response when
+// creating a remove_from_placement_group server action.
+type ServerActionRemoveFromPlacementGroupResponse struct {
+ Action Action `json:"action"`
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server.go
index 5993d83dd..4a0f2b11a 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server.go
@@ -37,6 +37,7 @@ type Server struct {
Labels map[string]string
Volumes []*Volume
PrimaryDiskSize int
+ PlacementGroup *PlacementGroup
}
// ServerProtection represents the protection level of a server.
@@ -76,11 +77,23 @@ const (
ServerStatusUnknown ServerStatus = "unknown"
)
+// FirewallStatus specifies a Firewall's status.
+type FirewallStatus string
+
+const (
+ // FirewallStatusPending is the status when a Firewall is pending.
+ FirewallStatusPending FirewallStatus = "pending"
+
+ // FirewallStatusApplied is the status when a Firewall is applied.
+ FirewallStatusApplied FirewallStatus = "applied"
+)
+
// ServerPublicNet represents a server's public network.
type ServerPublicNet struct {
IPv4 ServerPublicNetIPv4
IPv6 ServerPublicNetIPv6
FloatingIPs []*FloatingIP
+ Firewalls []*ServerFirewallStatus
}
// ServerPublicNetIPv4 represents a server's public IPv4 address.
@@ -90,7 +103,7 @@ type ServerPublicNetIPv4 struct {
DNSPtr string
}
-// ServerPublicNetIPv6 represents a server's public IPv6 network and address.
+// ServerPublicNetIPv6 represents a Server's public IPv6 network and address.
type ServerPublicNetIPv6 struct {
IP net.IP
Network *net.IPNet
@@ -98,7 +111,7 @@ type ServerPublicNetIPv6 struct {
DNSPtr map[string]string
}
-// ServerPrivateNet defines the schema of a server's private network information.
+// ServerPrivateNet defines the schema of a Server's private network information.
type ServerPrivateNet struct {
Network *Network
IP net.IP
@@ -111,6 +124,13 @@ func (s *ServerPublicNetIPv6) DNSPtrForIP(ip net.IP) string {
return s.DNSPtr[ip.String()]
}
+// ServerFirewallStatus represents a Firewall and its status on a Server's
+// network interface.
+type ServerFirewallStatus struct {
+ Firewall Firewall
+ Status FirewallStatus
+}
+
// ServerRescueType represents rescue types.
type ServerRescueType string
@@ -121,6 +141,44 @@ const (
ServerRescueTypeFreeBSD64 ServerRescueType = "freebsd64"
)
+// changeDNSPtr changes or resets the reverse DNS pointer for a IP address.
+// Pass a nil ptr to reset the reverse DNS pointer to its default value.
+func (s *Server) changeDNSPtr(ctx context.Context, client *Client, ip net.IP, ptr *string) (*Action, *Response, error) {
+ reqBody := schema.ServerActionChangeDNSPtrRequest{
+ IP: ip.String(),
+ DNSPtr: ptr,
+ }
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ path := fmt.Sprintf("/servers/%d/actions/change_dns_ptr", s.ID)
+ req, err := client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ respBody := schema.ServerActionChangeDNSPtrResponse{}
+ resp, err := client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionFromSchema(respBody.Action), resp, nil
+}
+
+// GetDNSPtrForIP searches for the dns assigned to the given IP address.
+// It returns an error if there is no dns set for the given IP address.
+func (s *Server) GetDNSPtrForIP(ip net.IP) (string, error) {
+ if net.IP.Equal(s.PublicNet.IPv4.IP, ip) {
+ return s.PublicNet.IPv4.DNSPtr, nil
+ } else if dns, ok := s.PublicNet.IPv6.DNSPtr[ip.String()]; ok {
+ return dns, nil
+ }
+
+ return "", DNSNotFoundError{ip}
+}
+
// ServerClient is a client for the servers API.
type ServerClient struct {
client *Client
@@ -215,7 +273,7 @@ func (c *ServerClient) All(ctx context.Context) ([]*Server, error) {
func (c *ServerClient) AllWithOpts(ctx context.Context, opts ServerListOpts) ([]*Server, error) {
allServers := []*Server{}
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
servers, resp, err := c.List(ctx, opts)
if err != nil {
@@ -245,6 +303,13 @@ type ServerCreateOpts struct {
Automount *bool
Volumes []*Volume
Networks []*Network
+ Firewalls []*ServerCreateFirewall
+ PlacementGroup *PlacementGroup
+}
+
+// ServerCreateFirewall defines which Firewalls to apply when creating a Server.
+type ServerCreateFirewall struct {
+ Firewall Firewall
}
// Validate checks if options are valid.
@@ -305,7 +370,11 @@ func (c *ServerClient) Create(ctx context.Context, opts ServerCreateOpts) (Serve
for _, network := range opts.Networks {
reqBody.Networks = append(reqBody.Networks, network.ID)
}
-
+ for _, firewall := range opts.Firewalls {
+ reqBody.Firewalls = append(reqBody.Firewalls, schema.ServerCreateFirewalls{
+ Firewall: firewall.Firewall.ID,
+ })
+ }
if opts.Location != nil {
if opts.Location.ID != 0 {
reqBody.Location = strconv.Itoa(opts.Location.ID)
@@ -320,6 +389,9 @@ func (c *ServerClient) Create(ctx context.Context, opts ServerCreateOpts) (Serve
reqBody.Datacenter = opts.Datacenter.Name
}
}
+ if opts.PlacementGroup != nil {
+ reqBody.PlacementGroup = opts.PlacementGroup.ID
+ }
reqBodyData, err := json.Marshal(reqBody)
if err != nil {
return ServerCreateResult{}, nil, err
@@ -774,27 +846,11 @@ func (c *ServerClient) ChangeType(ctx context.Context, server *Server, opts Serv
// ChangeDNSPtr changes or resets the reverse DNS pointer for a server IP address.
// Pass a nil ptr to reset the reverse DNS pointer to its default value.
func (c *ServerClient) ChangeDNSPtr(ctx context.Context, server *Server, ip string, ptr *string) (*Action, *Response, error) {
- reqBody := schema.ServerActionChangeDNSPtrRequest{
- IP: ip,
- DNSPtr: ptr,
- }
- reqBodyData, err := json.Marshal(reqBody)
- if err != nil {
- return nil, nil, err
+ netIP := net.ParseIP(ip)
+ if netIP == nil {
+ return nil, nil, InvalidIPError{ip}
}
-
- path := fmt.Sprintf("/servers/%d/actions/change_dns_ptr", server.ID)
- req, err := c.client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
- if err != nil {
- return nil, nil, err
- }
-
- respBody := schema.ServerActionChangeDNSPtrResponse{}
- resp, err := c.client.Do(req, &respBody)
- if err != nil {
- return nil, resp, err
- }
- return ActionFromSchema(respBody.Action), resp, nil
+ return server.changeDNSPtr(ctx, c.client, net.ParseIP(ip), ptr)
}
// ServerChangeProtectionOpts specifies options for changing the resource protection level of a server.
@@ -1001,7 +1057,7 @@ func (o *ServerGetMetricsOpts) addQueryParams(req *http.Request) error {
return nil
}
-// ServerMetrics contains the metrics requested for a server.
+// ServerMetrics contains the metrics requested for a Server.
type ServerMetrics struct {
Start time.Time
End time.Time
@@ -1015,7 +1071,7 @@ type ServerMetricsValue struct {
Value string
}
-// GetMetrics obtains metrics for server.
+// GetMetrics obtains metrics for Server.
func (c *ServerClient) GetMetrics(ctx context.Context, server *Server, opts ServerGetMetricsOpts) (*ServerMetrics, *Response, error) {
var respBody schema.ServerGetMetricsResponse
@@ -1041,3 +1097,40 @@ func (c *ServerClient) GetMetrics(ctx context.Context, server *Server, opts Serv
}
return ms, resp, nil
}
+
+func (c *ServerClient) AddToPlacementGroup(ctx context.Context, server *Server, placementGroup *PlacementGroup) (*Action, *Response, error) {
+ reqBody := schema.ServerActionAddToPlacementGroupRequest{
+ PlacementGroup: placementGroup.ID,
+ }
+ reqBodyData, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, nil, err
+ }
+ path := fmt.Sprintf("/servers/%d/actions/add_to_placement_group", server.ID)
+ req, err := c.client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ respBody := schema.ServerActionAddToPlacementGroupResponse{}
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionFromSchema(respBody.Action), resp, err
+}
+
+func (c *ServerClient) RemoveFromPlacementGroup(ctx context.Context, server *Server) (*Action, *Response, error) {
+ path := fmt.Sprintf("/servers/%d/actions/remove_from_placement_group", server.ID)
+ req, err := c.client.NewRequest(ctx, "POST", path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ respBody := schema.ServerActionRemoveFromPlacementGroupResponse{}
+ resp, err := c.client.Do(req, &respBody)
+ if err != nil {
+ return nil, resp, err
+ }
+ return ActionFromSchema(respBody.Action), resp, err
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go
index 043945f47..a681d0918 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go
@@ -40,7 +40,7 @@ const (
// CPUTypeShared is the type for shared CPU.
CPUTypeShared CPUType = "shared"
- //CPUTypeDedicated is the type for dedicated CPU.
+ // CPUTypeDedicated is the type for dedicated CPU.
CPUTypeDedicated CPUType = "dedicated"
)
@@ -132,7 +132,7 @@ func (c *ServerTypeClient) All(ctx context.Context) ([]*ServerType, error) {
opts := ServerTypeListOpts{}
opts.PerPage = 50
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
serverTypes, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/ssh_key.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/ssh_key.go
index f5be20b8d..52195e770 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/ssh_key.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/ssh_key.go
@@ -126,7 +126,7 @@ func (c *SSHKeyClient) All(ctx context.Context) ([]*SSHKey, error) {
func (c *SSHKeyClient) AllWithOpts(ctx context.Context, opts SSHKeyListOpts) ([]*SSHKey, error) {
allSSHKeys := []*SSHKey{}
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
sshKeys, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/testing.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/testing.go
new file mode 100644
index 000000000..a3cfb2f0c
--- /dev/null
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/testing.go
@@ -0,0 +1,18 @@
+package hcloud
+
+import (
+ "testing"
+ "time"
+)
+
+const apiTimestampFormat = "2006-01-02T15:04:05-07:00"
+
+func mustParseTime(t *testing.T, layout, value string) time.Time {
+ t.Helper()
+
+ ts, err := time.Parse(layout, value)
+ if err != nil {
+ t.Fatalf("parse time: layout %v: value %v: %v", layout, value, err)
+ }
+ return ts
+}
diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/volume.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/volume.go
index fd32271cf..53559365d 100644
--- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/volume.go
+++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/volume.go
@@ -137,7 +137,7 @@ func (c *VolumeClient) All(ctx context.Context) ([]*Volume, error) {
func (c *VolumeClient) AllWithOpts(ctx context.Context, opts VolumeListOpts) ([]*Volume, error) {
allVolumes := []*Volume{}
- _, err := c.client.all(func(page int) (*Response, error) {
+ err := c.client.all(func(page int) (*Response, error) {
opts.Page = page
volumes, resp, err := c.List(ctx, opts)
if err != nil {
diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml
new file mode 100644
index 000000000..8a0681af8
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.deepsource.toml
@@ -0,0 +1,12 @@
+version = 1
+
+test_patterns = [
+ "*_test.go"
+]
+
+[[analyzers]]
+name = "go"
+enabled = true
+
+ [analyzers.meta]
+ import_path = "github.com/imdario/mergo"
\ No newline at end of file
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
new file mode 100644
index 000000000..529c3412b
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.gitignore
@@ -0,0 +1,33 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 000000000..dad29725f
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+install:
+ - go get -t
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+script:
+ - go test -race -v ./...
+after_script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..469b44907
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 000000000..686680298
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 000000000..876abb500
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,247 @@
+# Mergo
+
+
+[![GoDoc][3]][4]
+[![GitHub release][5]][6]
+[![GoCard][7]][8]
+[![Build Status][1]][2]
+[![Coverage Status][9]][10]
+[![Sourcegraph][11]][12]
+[![FOSSA Status][13]][14]
+
+[![GoCenter Kudos][15]][16]
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://img.shields.io/github/release/imdario/mergo.svg
+[6]: https://github.com/imdario/mergo/releases
+[7]: https://goreportcard.com/badge/imdario/mergo
+[8]: https://goreportcard.com/report/github.com/imdario/mergo
+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[10]: https://coveralls.io/github/imdario/mergo?branch=master
+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
+[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
+[16]: https://search.gocenter.io/github.com/imdario/mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+### Important note
+
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
+
+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+### Donations
+
+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
+
+
+[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
+[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
+
+
+### Mergo in the wild
+
+- [moby/moby](https://github.com/moby/moby)
+- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+- [vmware/dispatch](https://github.com/vmware/dispatch)
+- [Shopify/themekit](https://github.com/Shopify/themekit)
+- [imdario/zas](https://github.com/imdario/zas)
+- [matcornic/hermes](https://github.com/matcornic/hermes)
+- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
+- [kataras/iris](https://github.com/kataras/iris)
+- [michaelsauter/crane](https://github.com/michaelsauter/crane)
+- [go-task/task](https://github.com/go-task/task)
+- [sensu/uchiwa](https://github.com/sensu/uchiwa)
+- [ory/hydra](https://github.com/ory/hydra)
+- [sisatech/vcli](https://github.com/sisatech/vcli)
+- [dairycart/dairycart](https://github.com/dairycart/dairycart)
+- [projectcalico/felix](https://github.com/projectcalico/felix)
+- [resin-os/balena](https://github.com/resin-os/balena)
+- [go-kivik/kivik](https://github.com/go-kivik/kivik)
+- [Telefonica/govice](https://github.com/Telefonica/govice)
+- [supergiant/supergiant](supergiant/supergiant)
+- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
+- [janoszen/containerssh](https://github.com/janoszen/containerssh)
+
+## Install
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+Here is a nice example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v2
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+)
+
+type timeTransformer struct {
+}
+
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+type Snapshot struct {
+ Time time.Time
+ // ...
+}
+
+func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## Top Contributors
+
+[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
+[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
+[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
+[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
+[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
+[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
+[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
+[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
+
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 000000000..fcd985f99
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,143 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+Important note
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+Install
+
+Do your usual installation procedure:
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ )
+
+ type Foo struct {
+ A string
+ B int64
+ }
+
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+ }
+
+Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
+ }
+
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
+
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+ }
+
+Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+About
+
+Written by Dario Castañé: https://da.rio.hn
+
+License
+
+BSD 3-Clause license, as Go language.
+
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod
new file mode 100644
index 000000000..3d689d93e
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/go.mod
@@ -0,0 +1,5 @@
+module github.com/imdario/mergo
+
+go 1.13
+
+require gopkg.in/yaml.v2 v2.3.0
diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum
new file mode 100644
index 000000000..168980da5
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/go.sum
@@ -0,0 +1,4 @@
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 000000000..a13a7ee46
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,178 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Ptr:
+ if dst.IsNil() {
+ v := reflect.New(dst.Type().Elem())
+ dst.Set(v)
+ }
+ dst = dst.Elem()
+ fallthrough
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ config.overwriteWithEmptyValue = true
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, opts...)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: Use Map(…) with WithOverride
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, append(opts, WithOverride)...)
+}
+
+func _map(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerAgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 000000000..afa84a1e2
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,375 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func hasMergeableFields(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
+func isExportedComponent(field *reflect.StructField) bool {
+ pkgPath := field.PkgPath
+ if len(pkgPath) > 0 {
+ return false
+ }
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ return false
+ }
+ return true
+}
+
+type Config struct {
+ Overwrite bool
+ AppendSlice bool
+ TypeCheck bool
+ Transformers Transformers
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
+}
+
+type Transformers interface {
+ Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
+
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+
+ if config.Transformers != nil && !isEmptyValue(dst) {
+ if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+ err = fn(dst, src)
+ return
+ }
+ }
+
+ switch dst.Kind() {
+ case reflect.Struct:
+ if hasMergeableFields(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ } else {
+ if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
+ dst.Set(src)
+ }
+ }
+ case reflect.Map:
+ if dst.IsNil() && !src.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite {
+ dst.Set(src)
+ }
+ return
+ }
+
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
+ }
+ dst.SetMapIndex(key, dstSlice)
+ }
+ }
+ if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
+ continue
+ }
+
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+ case reflect.Slice:
+ if !dst.CanSet() {
+ break
+ }
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
+
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
+ }
+ default:
+ mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
+ if mustSet {
+ if dst.CanSet() {
+ dst.Set(src)
+ } else {
+ dst = src
+ }
+ }
+ }
+
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+ return func(config *Config) {
+ config.Transformers = transformers
+ }
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+ config.Overwrite = true
+}
+
+// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
+func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
+ config.overwriteWithEmptyValue = true
+}
+
+// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
+func WithAppendSlice(config *Config) {
+ config.AppendSlice = true
+}
+
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerAgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
+
+// IsReflectNil is the reflect value provided nil
+func isReflectNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return v.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 000000000..3cc926c7f
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,78 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+ ErrNonPointerAgument = errors.New("dst must be a pointer")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ ptr uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// From src/pkg/encoding/json/encode.go.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return true
+ }
+ return isEmptyValue(v.Elem())
+ case reflect.Func:
+ return v.IsNil()
+ case reflect.Invalid:
+ return true
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml
index 0f0728d2f..2fc5e5f5b 100644
--- a/vendor/github.com/jessevdk/go-flags/.travis.yml
+++ b/vendor/github.com/jessevdk/go-flags/.travis.yml
@@ -5,19 +5,14 @@ os:
- osx
go:
- - 1.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - 1.10.x
+ - 1.16.x
install:
# go-flags
- - go get -d -v ./...
- go build -v ./...
# linting
- - go get github.com/golang/lint/golint
+ - go get -v golang.org/x/lint/golint
# code coverage
- go get golang.org/x/tools/cmd/cover
diff --git a/vendor/github.com/jessevdk/go-flags/README.md b/vendor/github.com/jessevdk/go-flags/README.md
index 3b02394ed..f22650b20 100644
--- a/vendor/github.com/jessevdk/go-flags/README.md
+++ b/vendor/github.com/jessevdk/go-flags/README.md
@@ -61,6 +61,9 @@ var opts struct {
// Example of a required flag
Name string `short:"n" long:"name" description:"A name" required:"true"`
+ // Example of a flag restricted to a pre-defined set of strings
+ Animal string `long:"animal" choice:"cat" choice:"dog"`
+
// Example of a value name
File string `short:"f" long:"file" description:"A file" value-name:"FILE"`
@@ -91,6 +94,7 @@ args := []string{
"-vv",
"--offset=5",
"-n", "Me",
+ "--animal", "dog", // anything other than "cat" or "dog" will raise an error
"-p", "3",
"-s", "hello",
"-s", "world",
@@ -115,6 +119,7 @@ if err != nil {
fmt.Printf("Verbosity: %v\n", opts.Verbose)
fmt.Printf("Offset: %d\n", opts.Offset)
fmt.Printf("Name: %s\n", opts.Name)
+fmt.Printf("Animal: %s\n", opts.Animal)
fmt.Printf("Ptr: %d\n", *opts.Ptr)
fmt.Printf("StringSlice: %v\n", opts.StringSlice)
fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1])
diff --git a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
index c494f6119..5edc430ed 100644
--- a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
+++ b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
@@ -14,3 +14,7 @@ echo '# darwin'
GOARCH=amd64 GOOS=darwin go build
echo '# freebsd'
GOARCH=amd64 GOOS=freebsd go build
+echo '# aix ppc64'
+GOARCH=ppc64 GOOS=aix go build
+echo '# solaris amd64'
+GOARCH=amd64 GOOS=solaris go build
diff --git a/vendor/github.com/jessevdk/go-flags/command.go b/vendor/github.com/jessevdk/go-flags/command.go
index 486bacba1..879465d7a 100644
--- a/vendor/github.com/jessevdk/go-flags/command.go
+++ b/vendor/github.com/jessevdk/go-flags/command.go
@@ -438,7 +438,7 @@ func (c *Command) match(name string) bool {
return false
}
-func (c *Command) hasCliOptions() bool {
+func (c *Command) hasHelpOptions() bool {
ret := false
c.eachGroup(func(g *Group) {
@@ -447,7 +447,7 @@ func (c *Command) hasCliOptions() bool {
}
for _, opt := range g.options {
- if opt.canCli() {
+ if opt.showInHelp() {
ret = true
}
}
diff --git a/vendor/github.com/jessevdk/go-flags/completion.go b/vendor/github.com/jessevdk/go-flags/completion.go
index 7a7a08b93..8ed61f1db 100644
--- a/vendor/github.com/jessevdk/go-flags/completion.go
+++ b/vendor/github.com/jessevdk/go-flags/completion.go
@@ -2,6 +2,7 @@ package flags
import (
"fmt"
+ "os"
"path/filepath"
"reflect"
"sort"
@@ -62,6 +63,11 @@ func completionsWithoutDescriptions(items []string) []Completion {
// prefix.
func (f *Filename) Complete(match string) []Completion {
ret, _ := filepath.Glob(match + "*")
+ if len(ret) == 1 {
+ if info, err := os.Stat(ret[0]); err == nil && info.IsDir() {
+ ret[0] = ret[0] + "/"
+ }
+ }
return completionsWithoutDescriptions(ret)
}
@@ -76,7 +82,7 @@ func (c *completion) skipPositional(s *parseState, n int) {
func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion {
if short && len(match) != 0 {
return []Completion{
- Completion{
+ {
Item: prefix + match,
},
}
@@ -124,7 +130,7 @@ func (c *completion) completeCommands(s *parseState, match string) []Completion
n := make([]Completion, 0, len(s.command.commands))
for _, cmd := range s.command.commands {
- if cmd.data != c && strings.HasPrefix(cmd.Name, match) {
+ if cmd.data != c && !cmd.Hidden && strings.HasPrefix(cmd.Name, match) {
n = append(n, Completion{
Item: cmd.Name,
Description: cmd.ShortDescription,
diff --git a/vendor/github.com/jessevdk/go-flags/convert.go b/vendor/github.com/jessevdk/go-flags/convert.go
index 984aac89c..cda29b2f0 100644
--- a/vendor/github.com/jessevdk/go-flags/convert.go
+++ b/vendor/github.com/jessevdk/go-flags/convert.go
@@ -28,6 +28,15 @@ type Unmarshaler interface {
UnmarshalFlag(value string) error
}
+// ValueValidator is the interface implemented by types that can validate a
+// flag argument themselves. The provided value is directly passed from the
+// command line.
+type ValueValidator interface {
+ // IsValidValue returns an error if the provided string value is valid for
+ // the flag.
+ IsValidValue(value string) error
+}
+
func getBase(options multiTag, base int) (int, error) {
sbase := options.Get("base")
diff --git a/vendor/github.com/jessevdk/go-flags/error.go b/vendor/github.com/jessevdk/go-flags/error.go
index 05528d8d2..73e07cfc2 100644
--- a/vendor/github.com/jessevdk/go-flags/error.go
+++ b/vendor/github.com/jessevdk/go-flags/error.go
@@ -97,6 +97,10 @@ func (e ErrorType) String() string {
return "unrecognized error type"
}
+func (e ErrorType) Error() string {
+ return e.String()
+}
+
// Error represents a parser error. The error returned from Parse is of this
// type. The error contains both a Type and Message.
type Error struct {
diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go
index 889762d10..ac2157dd6 100644
--- a/vendor/github.com/jessevdk/go-flags/flags.go
+++ b/vendor/github.com/jessevdk/go-flags/flags.go
@@ -109,7 +109,8 @@ The following is a list of tags for struct fields supported by go-flags:
value-name: the name of the argument value (to be shown in the help)
(optional)
choice: limits the values for an option to a set of values.
- This tag can be specified multiple times (optional)
+ Repeat this tag once for each allowable value.
+ e.g. `long:"animal" choice:"cat" choice:"dog"`
hidden: if non-empty, the option is not visible in the help or man page.
base: a base (radix) used to convert strings to integer values, the
@@ -125,6 +126,10 @@ The following is a list of tags for struct fields supported by go-flags:
gets prepended to every option's long name and
subgroup's namespace of this group, separated by
the parser's namespace delimiter (optional)
+ env-namespace: when specified on a group struct field, the env-namespace
+ gets prepended to every option's env key and
+ subgroup's env-namespace of this group, separated by
+ the parser's env-namespace delimiter (optional)
command: when specified on a struct field, makes the struct
field a (sub)command with the given name (optional)
subcommands-optional: when specified on a command struct field, makes
diff --git a/vendor/github.com/jessevdk/go-flags/go.mod b/vendor/github.com/jessevdk/go-flags/go.mod
new file mode 100644
index 000000000..a626c5d10
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/go.mod
@@ -0,0 +1,5 @@
+module github.com/jessevdk/go-flags
+
+go 1.15
+
+require golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4
diff --git a/vendor/github.com/jessevdk/go-flags/go.sum b/vendor/github.com/jessevdk/go-flags/go.sum
new file mode 100644
index 000000000..7503251e1
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go
index 9e057abd9..181caabb2 100644
--- a/vendor/github.com/jessevdk/go-flags/group.go
+++ b/vendor/github.com/jessevdk/go-flags/group.go
@@ -34,6 +34,9 @@ type Group struct {
// The namespace of the group
Namespace string
+ // The environment namespace of the group
+ EnvNamespace string
+
// If true, the group is not displayed in the help or man page
Hidden bool
@@ -70,6 +73,13 @@ func (g *Group) AddGroup(shortDescription string, longDescription string, data i
return group, nil
}
+// AddOption adds a new option to this group.
+func (g *Group) AddOption(option *Option, data interface{}) {
+ option.value = reflect.ValueOf(data)
+ option.group = g
+ g.options = append(g.options, option)
+}
+
// Groups returns the list of groups embedded in this group.
func (g *Group) Groups() []*Group {
return g.groups
@@ -165,6 +175,18 @@ func (g *Group) optionByName(name string, namematch func(*Option, string) bool)
return retopt
}
+func (g *Group) showInHelp() bool {
+ if g.Hidden {
+ return false
+ }
+ for _, opt := range g.options {
+ if opt.showInHelp() {
+ return true
+ }
+ }
+ return false
+}
+
func (g *Group) eachGroup(f func(*Group)) {
f(g)
@@ -358,6 +380,7 @@ func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.Struc
}
group.Namespace = mtag.Get("namespace")
+ group.EnvNamespace = mtag.Get("env-namespace")
group.Hidden = mtag.Get("hidden") != ""
return true, nil
diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go
index d38030500..068fce152 100644
--- a/vendor/github.com/jessevdk/go-flags/help.go
+++ b/vendor/github.com/jessevdk/go-flags/help.go
@@ -72,6 +72,9 @@ func (p *Parser) getAlignmentInfo() alignmentInfo {
var prevcmd *Command
p.eachActiveGroup(func(c *Command, grp *Group) {
+ if !grp.showInHelp() {
+ return
+ }
if c != prevcmd {
for _, arg := range c.args {
ret.updateLen(arg.Name, c != p.Command)
@@ -79,7 +82,7 @@ func (p *Parser) getAlignmentInfo() alignmentInfo {
}
for _, info := range grp.options {
- if !info.canCli() {
+ if !info.showInHelp() {
continue
}
@@ -225,12 +228,12 @@ func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alig
}
var envDef string
- if option.EnvDefaultKey != "" {
+ if option.EnvKeyWithNamespace() != "" {
var envPrintable string
if runtime.GOOS == "windows" {
- envPrintable = "%" + option.EnvDefaultKey + "%"
+ envPrintable = "%" + option.EnvKeyWithNamespace() + "%"
} else {
- envPrintable = "$" + option.EnvDefaultKey
+ envPrintable = "$" + option.EnvKeyWithNamespace()
}
envDef = fmt.Sprintf(" [%s]", envPrintable)
}
@@ -305,7 +308,7 @@ func (p *Parser) WriteHelp(writer io.Writer) {
}
} else if us, ok := allcmd.data.(Usage); ok {
usage = us.Usage()
- } else if allcmd.hasCliOptions() {
+ } else if allcmd.hasHelpOptions() {
usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name)
}
@@ -393,7 +396,7 @@ func (p *Parser) WriteHelp(writer io.Writer) {
}
for _, info := range grp.options {
- if !info.canCli() || info.Hidden {
+ if !info.showInHelp() {
continue
}
@@ -489,3 +492,23 @@ func (p *Parser) WriteHelp(writer io.Writer) {
wr.Flush()
}
+
+// WroteHelp is a helper to test the error from ParseArgs() to
+// determine if the help message was written. It is safe to
+// call without first checking that error is nil.
+func WroteHelp(err error) bool {
+ if err == nil { // No error
+ return false
+ }
+
+ flagError, ok := err.(*Error)
+ if !ok { // Not a go-flag error
+ return false
+ }
+
+ if flagError.Type != ErrHelp { // Did not print the help message
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/jessevdk/go-flags/ini.go b/vendor/github.com/jessevdk/go-flags/ini.go
index e714d3d38..60b36c79c 100644
--- a/vendor/github.com/jessevdk/go-flags/ini.go
+++ b/vendor/github.com/jessevdk/go-flags/ini.go
@@ -325,19 +325,19 @@ func writeCommandIni(command *Command, namespace string, writer io.Writer, optio
})
for _, c := range command.commands {
- var nns string
+ var fqn string
if c.Hidden {
continue
}
if len(namespace) != 0 {
- nns = c.Name + "." + nns
+ fqn = namespace + "." + c.Name
} else {
- nns = c.Name
+ fqn = c.Name
}
- writeCommandIni(c, nns, writer, options)
+ writeCommandIni(c, fqn, writer, options)
}
}
@@ -499,13 +499,21 @@ func (i *IniParser) matchingGroups(name string) []*Group {
func (i *IniParser) parse(ini *ini) error {
p := i.parser
+ p.eachOption(func(cmd *Command, group *Group, option *Option) {
+ option.clearReferenceBeforeSet = true
+ })
+
var quotesLookup = make(map[*Option]bool)
for name, section := range ini.Sections {
groups := i.matchingGroups(name)
if len(groups) == 0 {
- return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name)
+ if (p.Options & IgnoreUnknown) == None {
+ return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name)
+ }
+
+ continue
}
for _, inival := range section {
@@ -537,9 +545,8 @@ func (i *IniParser) parse(ini *ini) error {
continue
}
- // ini value is ignored if override is set and
- // value was previously set from non default
- if i.ParseAsDefaults && !opt.isSetDefault {
+ // ini value is ignored if parsed as default but defaults are prevented
+ if i.ParseAsDefaults && opt.preventDefault {
continue
}
@@ -572,7 +579,15 @@ func (i *IniParser) parse(ini *ini) error {
}
}
- if err := opt.set(pval); err != nil {
+ var err error
+
+ if i.ParseAsDefaults {
+ err = opt.setDefault(pval)
+ } else {
+ err = opt.set(pval)
+ }
+
+ if err != nil {
return &IniError{
Message: err.Error(),
File: ini.File,
@@ -580,6 +595,9 @@ func (i *IniParser) parse(ini *ini) error {
}
}
+ // Defaults from ini files take precendence over defaults from parser
+ opt.preventDefault = true
+
// either all INI values are quoted or only values who need quoting
if _, ok := quotesLookup[opt]; !inival.Quoted || !ok {
quotesLookup[opt] = inival.Quoted
diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go
index 0cb114e74..82572f9a7 100644
--- a/vendor/github.com/jessevdk/go-flags/man.go
+++ b/vendor/github.com/jessevdk/go-flags/man.go
@@ -3,42 +3,55 @@ package flags
import (
"fmt"
"io"
+ "os"
"runtime"
+ "strconv"
"strings"
"time"
)
+func manQuoteLines(s string) string {
+ lines := strings.Split(s, "\n")
+ parts := []string{}
+
+ for _, line := range lines {
+ parts = append(parts, manQuote(line))
+ }
+
+ return strings.Join(parts, "\n")
+}
+
func manQuote(s string) string {
return strings.Replace(s, "\\", "\\\\", -1)
}
-func formatForMan(wr io.Writer, s string) {
+func formatForMan(wr io.Writer, s string, quoter func(s string) string) {
for {
idx := strings.IndexRune(s, '`')
if idx < 0 {
- fmt.Fprintf(wr, "%s", manQuote(s))
+ fmt.Fprintf(wr, "%s", quoter(s))
break
}
- fmt.Fprintf(wr, "%s", manQuote(s[:idx]))
+ fmt.Fprintf(wr, "%s", quoter(s[:idx]))
s = s[idx+1:]
idx = strings.IndexRune(s, '\'')
if idx < 0 {
- fmt.Fprintf(wr, "%s", manQuote(s))
+ fmt.Fprintf(wr, "%s", quoter(s))
break
}
- fmt.Fprintf(wr, "\\fB%s\\fP", manQuote(s[:idx]))
+ fmt.Fprintf(wr, "\\fB%s\\fP", quoter(s[:idx]))
s = s[idx+1:]
}
}
func writeManPageOptions(wr io.Writer, grp *Group) {
grp.eachGroup(func(group *Group) {
- if group.Hidden || len(group.options) == 0 {
+ if !group.showInHelp() {
return
}
@@ -48,13 +61,13 @@ func writeManPageOptions(wr io.Writer, grp *Group) {
fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription)
if group.LongDescription != "" {
- formatForMan(wr, group.LongDescription)
+ formatForMan(wr, group.LongDescription, manQuoteLines)
fmt.Fprintln(wr, "")
}
}
for _, opt := range group.options {
- if !opt.canCli() || opt.Hidden {
+ if !opt.showInHelp() {
continue
}
@@ -83,11 +96,11 @@ func writeManPageOptions(wr io.Writer, grp *Group) {
if len(opt.Default) != 0 {
fmt.Fprintf(wr, " ", manQuote(strings.Join(quoteV(opt.Default), ", ")))
- } else if len(opt.EnvDefaultKey) != 0 {
+ } else if len(opt.EnvKeyWithNamespace()) != 0 {
if runtime.GOOS == "windows" {
- fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey))
+ fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace()))
} else {
- fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey))
+ fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace()))
}
}
@@ -98,14 +111,14 @@ func writeManPageOptions(wr io.Writer, grp *Group) {
fmt.Fprintln(wr, "\\fP")
if len(opt.Description) != 0 {
- formatForMan(wr, opt.Description)
+ formatForMan(wr, opt.Description, manQuoteLines)
fmt.Fprintln(wr, "")
}
}
})
}
-func writeManPageSubcommands(wr io.Writer, name string, root *Command) {
+func writeManPageSubcommands(wr io.Writer, name string, usagePrefix string, root *Command) {
commands := root.sortedVisibleCommands()
for _, c := range commands {
@@ -121,11 +134,11 @@ func writeManPageSubcommands(wr io.Writer, name string, root *Command) {
nn = c.Name
}
- writeManPageCommand(wr, nn, root, c)
+ writeManPageCommand(wr, nn, usagePrefix, c)
}
}
-func writeManPageCommand(wr io.Writer, name string, root *Command, command *Command) {
+func writeManPageCommand(wr io.Writer, name string, usagePrefix string, command *Command) {
fmt.Fprintf(wr, ".SS %s\n", name)
fmt.Fprintln(wr, command.ShortDescription)
@@ -137,30 +150,27 @@ func writeManPageCommand(wr io.Writer, name string, root *Command, command *Comm
if strings.HasPrefix(command.LongDescription, cmdstart) {
fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name))
- formatForMan(wr, command.LongDescription[len(cmdstart):])
+ formatForMan(wr, command.LongDescription[len(cmdstart):], manQuoteLines)
fmt.Fprintln(wr, "")
} else {
- formatForMan(wr, command.LongDescription)
+ formatForMan(wr, command.LongDescription, manQuoteLines)
fmt.Fprintln(wr, "")
}
}
+ var pre = usagePrefix + " " + command.Name
+
var usage string
if us, ok := command.data.(Usage); ok {
usage = us.Usage()
- } else if command.hasCliOptions() {
+ } else if command.hasHelpOptions() {
usage = fmt.Sprintf("[%s-OPTIONS]", command.Name)
}
- var pre string
- if root.hasCliOptions() {
- pre = fmt.Sprintf("%s [OPTIONS] %s", root.Name, command.Name)
- } else {
- pre = fmt.Sprintf("%s %s", root.Name, command.Name)
- }
-
+ var nextPrefix = pre
if len(usage) > 0 {
fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage))
+ nextPrefix = pre + " " + usage
}
if len(command.Aliases) > 0 {
@@ -168,17 +178,25 @@ func writeManPageCommand(wr io.Writer, name string, root *Command, command *Comm
}
writeManPageOptions(wr, command.Group)
- writeManPageSubcommands(wr, name, command)
+ writeManPageSubcommands(wr, name, nextPrefix, command)
}
// WriteManPage writes a basic man page in groff format to the specified
// writer.
func (p *Parser) WriteManPage(wr io.Writer) {
t := time.Now()
+ source_date_epoch := os.Getenv("SOURCE_DATE_EPOCH")
+ if source_date_epoch != "" {
+ sde, err := strconv.ParseInt(source_date_epoch, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Invalid SOURCE_DATE_EPOCH: %s", err))
+ }
+ t = time.Unix(sde, 0)
+ }
fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006"))
fmt.Fprintln(wr, ".SH NAME")
- fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuote(p.ShortDescription))
+ fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuoteLines(p.ShortDescription))
fmt.Fprintln(wr, ".SH SYNOPSIS")
usage := p.Usage
@@ -190,7 +208,7 @@ func (p *Parser) WriteManPage(wr io.Writer) {
fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage))
fmt.Fprintln(wr, ".SH DESCRIPTION")
- formatForMan(wr, p.LongDescription)
+ formatForMan(wr, p.LongDescription, manQuoteLines)
fmt.Fprintln(wr, "")
fmt.Fprintln(wr, ".SH OPTIONS")
@@ -200,6 +218,6 @@ func (p *Parser) WriteManPage(wr io.Writer) {
if len(p.visibleCommands()) > 0 {
fmt.Fprintln(wr, ".SH COMMANDS")
- writeManPageSubcommands(wr, "", p.Command)
+ writeManPageSubcommands(wr, "", p.Name+" "+usage, p.Command)
}
}
diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go
index 5f8525019..f6d694181 100644
--- a/vendor/github.com/jessevdk/go-flags/option.go
+++ b/vendor/github.com/jessevdk/go-flags/option.go
@@ -80,10 +80,11 @@ type Option struct {
// Determines if the option will be always quoted in the INI output
iniQuote bool
- tag multiTag
- isSet bool
- isSetDefault bool
- preventDefault bool
+ tag multiTag
+ isSet bool
+ isSetDefault bool
+ preventDefault bool
+ clearReferenceBeforeSet bool
defaultLiteral string
}
@@ -139,6 +140,57 @@ func (option *Option) LongNameWithNamespace() string {
return longName
}
+// EnvKeyWithNamespace returns the option's env key with the group namespaces
+// prepended by walking up the option's group tree. Namespaces and the env key
+// itself are separated by the parser's namespace delimiter. If the env key is
+// empty an empty string is returned.
+func (option *Option) EnvKeyWithNamespace() string {
+ if len(option.EnvDefaultKey) == 0 {
+ return ""
+ }
+
+ // fetch the namespace delimiter from the parser which is always at the
+ // end of the group hierarchy
+ namespaceDelimiter := ""
+ g := option.group
+
+ for {
+ if p, ok := g.parent.(*Parser); ok {
+ namespaceDelimiter = p.EnvNamespaceDelimiter
+
+ break
+ }
+
+ switch i := g.parent.(type) {
+ case *Command:
+ g = i.Group
+ case *Group:
+ g = i
+ }
+ }
+
+ // concatenate long name with namespace
+ key := option.EnvDefaultKey
+ g = option.group
+
+ for g != nil {
+ if g.EnvNamespace != "" {
+ key = g.EnvNamespace + namespaceDelimiter + key
+ }
+
+ switch i := g.parent.(type) {
+ case *Command:
+ g = i.Group
+ case *Group:
+ g = i
+ case *Parser:
+ g = nil
+ }
+ }
+
+ return key
+}
+
// String converts an option to a human friendly readable string describing the
// option.
func (option *Option) String() string {
@@ -190,12 +242,13 @@ func (option *Option) IsSetDefault() bool {
func (option *Option) set(value *string) error {
kind := option.value.Type().Kind()
- if (kind == reflect.Map || kind == reflect.Slice) && !option.isSet {
+ if (kind == reflect.Map || kind == reflect.Slice) && option.clearReferenceBeforeSet {
option.empty()
}
option.isSet = true
option.preventDefault = true
+ option.clearReferenceBeforeSet = false
if len(option.Choices) != 0 {
found := false
@@ -229,8 +282,23 @@ func (option *Option) set(value *string) error {
return convert("", option.value, option.tag)
}
-func (option *Option) canCli() bool {
- return option.ShortName != 0 || len(option.LongName) != 0
+func (option *Option) setDefault(value *string) error {
+ if option.preventDefault {
+ return nil
+ }
+
+ if err := option.set(value); err != nil {
+ return err
+ }
+
+ option.isSetDefault = true
+ option.preventDefault = false
+
+ return nil
+}
+
+func (option *Option) showInHelp() bool {
+ return !option.Hidden && (option.ShortName != 0 || len(option.LongName) != 0)
}
func (option *Option) canArgument() bool {
@@ -257,14 +325,17 @@ func (option *Option) empty() {
}
}
-func (option *Option) clearDefault() {
+func (option *Option) clearDefault() error {
+ if option.preventDefault {
+ return nil
+ }
+
usedDefault := option.Default
- if envKey := option.EnvDefaultKey; envKey != "" {
+ if envKey := option.EnvKeyWithNamespace(); envKey != "" {
if value, ok := os.LookupEnv(envKey); ok {
if option.EnvDefaultDelim != "" {
- usedDefault = strings.Split(value,
- option.EnvDefaultDelim)
+ usedDefault = strings.Split(value, option.EnvDefaultDelim)
} else {
usedDefault = []string{value}
}
@@ -277,8 +348,11 @@ func (option *Option) clearDefault() {
option.empty()
for _, d := range usedDefault {
- option.set(&d)
- option.isSetDefault = true
+ err := option.setDefault(&d)
+
+ if err != nil {
+ return err
+ }
}
} else {
tp := option.value.Type()
@@ -294,6 +368,8 @@ func (option *Option) clearDefault() {
}
}
}
+
+ return nil
}
func (option *Option) valueIsDefault() bool {
@@ -339,6 +415,30 @@ func (option *Option) isUnmarshaler() Unmarshaler {
return nil
}
+func (option *Option) isValueValidator() ValueValidator {
+ v := option.value
+
+ for {
+ if !v.CanInterface() {
+ break
+ }
+
+ i := v.Interface()
+
+ if u, ok := i.(ValueValidator); ok {
+ return u
+ }
+
+ if !v.CanAddr() {
+ break
+ }
+
+ v = v.Addr()
+ }
+
+ return nil
+}
+
func (option *Option) isBool() bool {
tp := option.value.Type()
@@ -457,3 +557,13 @@ func (option *Option) shortAndLongName() string {
return ret.String()
}
+
+func (option *Option) isValidValue(arg string) error {
+ if validator := option.isValueValidator(); validator != nil {
+ return validator.IsValidValue(arg)
+ }
+ if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') {
+ return fmt.Errorf("expected argument for flag `%s', but got option `%s'", option, arg)
+ }
+ return nil
+}
diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go
index 0a7922a08..3fc3f7ba1 100644
--- a/vendor/github.com/jessevdk/go-flags/parser.go
+++ b/vendor/github.com/jessevdk/go-flags/parser.go
@@ -9,6 +9,7 @@ import (
"fmt"
"os"
"path"
+ "reflect"
"sort"
"strings"
"unicode/utf8"
@@ -29,6 +30,9 @@ type Parser struct {
// NamespaceDelimiter separates group namespaces and option long names
NamespaceDelimiter string
+ // EnvNamespaceDelimiter separates group env namespaces and env keys
+ EnvNamespaceDelimiter string
+
// UnknownOptionsHandler is a function which gets called when the parser
// encounters an unknown option. The function receives the unknown option
// name, a SplitArgument which specifies its value if set with an argument
@@ -170,9 +174,10 @@ func NewParser(data interface{}, options Options) *Parser {
// be added to this parser by using AddGroup and AddCommand.
func NewNamedParser(appname string, options Options) *Parser {
p := &Parser{
- Command: newCommand(appname, "", "", nil),
- Options: options,
- NamespaceDelimiter: ".",
+ Command: newCommand(appname, "", "", nil),
+ Options: options,
+ NamespaceDelimiter: ".",
+ EnvNamespaceDelimiter: "_",
}
p.Command.parent = p
@@ -203,8 +208,7 @@ func (p *Parser) ParseArgs(args []string) ([]string, error) {
}
p.eachOption(func(c *Command, g *Group, option *Option) {
- option.isSet = false
- option.isSetDefault = false
+ option.clearReferenceBeforeSet = true
option.updateDefaultLiteral()
})
@@ -237,6 +241,7 @@ func (p *Parser) ParseArgs(args []string) ([]string, error) {
p.fillParseState(s)
for !s.eof() {
+ var err error
arg := s.pop()
// When PassDoubleDash is set and we encounter a --, then
@@ -247,6 +252,20 @@ func (p *Parser) ParseArgs(args []string) ([]string, error) {
}
if !argumentIsOption(arg) {
+ if (p.Options&PassAfterNonOption) != None && s.lookup.commands[arg] == nil {
+ // If PassAfterNonOption is set then all remaining arguments
+ // are considered positional
+ if err = s.addArgs(s.arg); err != nil {
+ break
+ }
+
+ if err = s.addArgs(s.args...); err != nil {
+ break
+ }
+
+ break
+ }
+
// Note: this also sets s.err, so we can just check for
// nil here and use s.err later
if p.parseNonOption(s) != nil {
@@ -256,8 +275,6 @@ func (p *Parser) ParseArgs(args []string) ([]string, error) {
continue
}
- var err error
-
prefix, optname, islong := stripOptionPrefix(arg)
optname, _, argument := splitOption(prefix, optname, islong)
@@ -293,11 +310,13 @@ func (p *Parser) ParseArgs(args []string) ([]string, error) {
if s.err == nil {
p.eachOption(func(c *Command, g *Group, option *Option) {
- if option.preventDefault {
- return
+ err := option.clearDefault()
+ if err != nil {
+ if _, ok := err.(*Error); !ok {
+ err = p.marshalError(option, err)
+ }
+ s.err = err
}
-
- option.clearDefault()
})
s.checkRequired(p)
@@ -515,8 +534,8 @@ func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg
} else {
arg = s.pop()
- if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') {
- return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got option `%s'", option, arg)
+ if validationErr := option.isValidValue(arg); validationErr != nil {
+ return newErrorf(ErrExpectedArgument, validationErr.Error())
} else if p.Options&PassDoubleDash != 0 && arg == "--" {
return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option)
}
@@ -545,16 +564,37 @@ func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg
if err != nil {
if _, ok := err.(*Error); !ok {
- err = newErrorf(ErrMarshal, "invalid argument for flag `%s' (expected %s): %s",
- option,
- option.value.Type(),
- err.Error())
+ err = p.marshalError(option, err)
}
}
return err
}
+func (p *Parser) marshalError(option *Option, err error) *Error {
+ s := "invalid argument for flag `%s'"
+
+ expected := p.expectedType(option)
+
+ if expected != "" {
+ s = s + " (expected " + expected + ")"
+ }
+
+ return newErrorf(ErrMarshal, s+": %s",
+ option,
+ err.Error())
+}
+
+func (p *Parser) expectedType(option *Option) string {
+ valueType := option.value.Type()
+
+ if valueType.Kind() == reflect.Func {
+ return ""
+ }
+
+ return valueType.String()
+}
+
func (p *Parser) parseLong(s *parseState, name string, argument *string) error {
if option := s.lookup.longNames[name]; option != nil {
// Only long options that are required can consume an argument
@@ -649,23 +689,7 @@ func (p *Parser) parseNonOption(s *parseState) error {
}
}
- if (p.Options & PassAfterNonOption) != None {
- // If PassAfterNonOption is set then all remaining arguments
- // are considered positional
- if err := s.addArgs(s.arg); err != nil {
- return err
- }
-
- if err := s.addArgs(s.args...); err != nil {
- return err
- }
-
- s.args = []string{}
- } else {
- return s.addArgs(s.arg)
- }
-
- return nil
+ return s.addArgs(s.arg)
}
func (p *Parser) showBuiltinHelp() error {
@@ -688,13 +712,3 @@ func (p *Parser) printError(err error) error {
return err
}
-
-func (p *Parser) clearIsSet() {
- p.eachCommand(func(c *Command) {
- c.eachGroup(func(g *Group) {
- for _, option := range g.options {
- option.isSet = false
- }
- })
- }, true)
-}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize.go b/vendor/github.com/jessevdk/go-flags/termsize.go
index 1ca6a8503..829e477ad 100644
--- a/vendor/github.com/jessevdk/go-flags/termsize.go
+++ b/vendor/github.com/jessevdk/go-flags/termsize.go
@@ -1,28 +1,15 @@
-// +build !windows,!plan9,!solaris,!appengine
+// +build !windows,!plan9,!appengine,!wasm
package flags
import (
- "syscall"
- "unsafe"
+ "golang.org/x/sys/unix"
)
-type winsize struct {
- row, col uint16
- xpixel, ypixel uint16
-}
-
func getTerminalColumns() int {
- ws := winsize{}
-
- if tIOCGWINSZ != 0 {
- syscall.Syscall(syscall.SYS_IOCTL,
- uintptr(0),
- uintptr(tIOCGWINSZ),
- uintptr(unsafe.Pointer(&ws)))
-
- return int(ws.col)
+ ws, err := unix.IoctlGetWinsize(0, unix.TIOCGWINSZ)
+ if err != nil {
+ return 80
}
-
- return 80
+ return int(ws.Col)
}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
index 3d5385b0b..c1ff18673 100644
--- a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
+++ b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
@@ -1,4 +1,4 @@
-// +build windows plan9 solaris appengine
+// +build plan9 appengine wasm
package flags
diff --git a/vendor/github.com/jessevdk/go-flags/termsize_windows.go b/vendor/github.com/jessevdk/go-flags/termsize_windows.go
new file mode 100644
index 000000000..5c0fa6ba2
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize_windows.go
@@ -0,0 +1,85 @@
+// +build windows
+
+package flags
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type (
+ SHORT int16
+ WORD uint16
+
+ SMALL_RECT struct {
+ Left SHORT
+ Top SHORT
+ Right SHORT
+ Bottom SHORT
+ }
+
+ COORD struct {
+ X SHORT
+ Y SHORT
+ }
+
+ CONSOLE_SCREEN_BUFFER_INFO struct {
+ Size COORD
+ CursorPosition COORD
+ Attributes WORD
+ Window SMALL_RECT
+ MaximumWindowSize COORD
+ }
+)
+
+var kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+var getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+
+func getError(r1, r2 uintptr, lastErr error) error {
+ // If the function fails, the return value is zero.
+ if r1 == 0 {
+ if lastErr != nil {
+ return lastErr
+ }
+ return syscall.EINVAL
+ }
+ return nil
+}
+
+func getStdHandle(stdhandle int) (uintptr, error) {
+ handle, err := syscall.GetStdHandle(stdhandle)
+ if err != nil {
+ return 0, err
+ }
+ return uintptr(handle), nil
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+ var info CONSOLE_SCREEN_BUFFER_INFO
+ if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+func getTerminalColumns() int {
+ defaultWidth := 80
+
+ stdoutHandle, err := getStdHandle(syscall.STD_OUTPUT_HANDLE)
+ if err != nil {
+ return defaultWidth
+ }
+
+ info, err := GetConsoleScreenBufferInfo(stdoutHandle)
+ if err != nil {
+ return defaultWidth
+ }
+
+ if info.MaximumWindowSize.X > 0 {
+ return int(info.MaximumWindowSize.X)
+ }
+
+ return defaultWidth
+}
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go
deleted file mode 100644
index fcc118601..000000000
--- a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build darwin freebsd netbsd openbsd
-
-package flags
-
-const (
- tIOCGWINSZ = 0x40087468
-)
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go
deleted file mode 100644
index e3975e283..000000000
--- a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build linux
-
-package flags
-
-const (
- tIOCGWINSZ = 0x5413
-)
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go
deleted file mode 100644
index 308215155..000000000
--- a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !darwin,!freebsd,!netbsd,!openbsd,!linux
-
-package flags
-
-const (
- tIOCGWINSZ = 0
-)
diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum
index d778b5a14..be00a6df9 100644
--- a/vendor/github.com/json-iterator/go/go.sum
+++ b/vendor/github.com/json-iterator/go/go.sum
@@ -9,6 +9,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLD
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
index b9754638e..8a3d8b6fb 100644
--- a/vendor/github.com/json-iterator/go/iter_float.go
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -288,6 +288,9 @@ non_decimal_loop:
return iter.readFloat64SlowPath()
}
value = (value << 3) + (value << 1) + uint64(ind)
+ if value > maxFloat64 {
+ return iter.readFloat64SlowPath()
+ }
}
}
return iter.readFloat64SlowPath()
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
index 214232035..d786a89fe 100644
--- a/vendor/github.com/json-iterator/go/iter_int.go
+++ b/vendor/github.com/json-iterator/go/iter_int.go
@@ -9,6 +9,7 @@ var intDigits []int8
const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+const maxFloat64 = 1<<53 - 1
func init() {
intDigits = make([]int8, 256)
@@ -339,7 +340,7 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) {
}
func (iter *Iterator) assertInteger() {
- if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
+ if iter.head < iter.tail && iter.buf[iter.head] == '.' {
iter.ReportError("assertInteger", "can not decode float as int")
}
}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
index 74974ba74..39acb320a 100644
--- a/vendor/github.com/json-iterator/go/reflect.go
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -65,7 +65,7 @@ func (iter *Iterator) ReadVal(obj interface{}) {
decoder := iter.cfg.getDecoderFromCache(cacheKey)
if decoder == nil {
typ := reflect2.TypeOf(obj)
- if typ.Kind() != reflect.Ptr {
+ if typ == nil || typ.Kind() != reflect.Ptr {
iter.ReportError("ReadVal", "can only unmarshal into pointer")
return
}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
index f2619936c..eba434f2f 100644
--- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
+++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -33,11 +33,19 @@ type jsonRawMessageCodec struct {
}
func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
+ if iter.ReadNil() {
+ *((*json.RawMessage)(ptr)) = nil
+ } else {
+ *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes()
+ }
}
func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+ if *((*json.RawMessage)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+ }
}
func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
@@ -48,11 +56,19 @@ type jsoniterRawMessageCodec struct {
}
func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
+ if iter.ReadNil() {
+ *((*RawMessage)(ptr)) = nil
+ } else {
+ *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes()
+ }
}
func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteRaw(string(*((*RawMessage)(ptr))))
+ if *((*RawMessage)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ stream.WriteRaw(string(*((*RawMessage)(ptr))))
+ }
}
func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
index d7eb0eb5c..92ae912dc 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -1075,6 +1075,11 @@ type stringModeNumberDecoder struct {
}
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.WhatIsNext() == NilValue {
+ decoder.elemDecoder.Decode(ptr, iter)
+ return
+ }
+
c := iter.nextToken()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
deleted file mode 100644
index 14127cd83..000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
deleted file mode 100644
index 09a4a35c9..000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Windows Terminal Sequences
-
-This library allow for enabling Windows terminal color support for Go.
-
-See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
-
-## Usage
-
-```go
-import (
- "syscall"
-
- sequences "github.com/konsorten/go-windows-terminal-sequences"
-)
-
-func main() {
- sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
-}
-
-```
-
-## Authors
-
-The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
-
-We thank all the authors who provided code to this library:
-
-* Felix Kollmann
-* Nicolas Perraut
-* @dirty49374
-
-## License
-
-(The MIT License)
-
-Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
deleted file mode 100644
index 716c61312..000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
deleted file mode 100644
index 57f530ae8..000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build windows
-
-package sequences
-
-import (
- "syscall"
-)
-
-var (
- kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
- setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
-)
-
-func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
- const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
-
- var mode uint32
- err := syscall.GetConsoleMode(syscall.Stdout, &mode)
- if err != nil {
- return err
- }
-
- if enable {
- mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
- } else {
- mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
- }
-
- ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode))
- if ret == 0 {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
deleted file mode 100644
index df61a6f2f..000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux darwin
-
-package sequences
-
-import (
- "fmt"
-)
-
-func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
- return fmt.Errorf("windows only package")
-}
diff --git a/vendor/github.com/linode/linodego/.gitignore b/vendor/github.com/linode/linodego/.gitignore
new file mode 100644
index 000000000..dcb4cc633
--- /dev/null
+++ b/vendor/github.com/linode/linodego/.gitignore
@@ -0,0 +1,23 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+
+# Common IDE paths
+.vscode/
+
+vendor/**/
+.env
+coverage.txt
+
diff --git a/vendor/github.com/linode/linodego/.golangci.yml b/vendor/github.com/linode/linodego/.golangci.yml
new file mode 100644
index 000000000..4f2b4daa6
--- /dev/null
+++ b/vendor/github.com/linode/linodego/.golangci.yml
@@ -0,0 +1,56 @@
+run:
+ tests: false
+
+linters-settings:
+ errcheck:
+ check-type-assertions: true
+ check-blank: true
+
+ govet:
+ check-shadowing: true
+
+ enable:
+ - atomicalign
+ enable-all: false
+ disable:
+ - shadow
+ disable-all: false
+ golint:
+ min-confidence: 0.8
+ gocyclo:
+ min-complexity: 30
+ gocognit:
+ min-complexity: 30
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 100
+
+linters:
+ enable-all: true
+ disable:
+ - vetshadow
+ - gocyclo
+ - unparam
+ - nakedret
+ - lll
+ - dupl
+ - gosec
+ - gochecknoinits
+ - gochecknoglobals
+ - errcheck
+ - staticcheck
+ - stylecheck
+ - wsl
+ - interfacer
+ - gomnd
+ - nlreturn
+ - wrapcheck
+ - wastedassign
+ - goerr113
+ - exhaustivestruct
+ - durationcheck
+ - errorlint
+ - cyclop
+ - godot
+ fast: false
diff --git a/vendor/github.com/linode/linodego/API_SUPPORT.md b/vendor/github.com/linode/linodego/API_SUPPORT.md
new file mode 100644
index 000000000..5eab78c60
--- /dev/null
+++ b/vendor/github.com/linode/linodego/API_SUPPORT.md
@@ -0,0 +1,441 @@
+# API Support
+
+This document tracks LinodeGo support for the features of the [Linode API](https://developers.linode.com/changelog/api/).
+
+Endpoints are implemented as needed, by need or user-request. As new features are added (as reported in the [Linode API Changelog](https://developers.linode.com/changelog/api/)) this document should be updated to reflect any missing endpoints. New or deprecated fields should also be indicated below the affected HTTP method, for example:
+
+```markdown
+- `/fake/endpoint`
+ - [x] `GET`
+ * 4.0.29 field X is not implemented
+ - [ ] `POST`
+ * 4.0.30 added support to create Fake things
+```
+
+See `template.go` and `template_test.go` for tips on adding new endpoints.
+
+## Linodes
+
+- `/linode/instances`
+ - [x] `GET`
+ - [X] `POST`
+- `/linode/instances/$id`
+ - [x] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/linode/instances/$id/boot`
+ - [x] `POST`
+- `/linode/instances/$id/clone`
+ - [x] `POST`
+- `/linode/instances/$id/mutate`
+ - [X] `POST`
+- `/linode/instances/$id/reboot`
+ - [x] `POST`
+- `/linode/instances/$id/rebuild`
+ - [X] `POST`
+- `/linode/instances/$id/rescue`
+ - [X] `POST`
+- `/linode/instances/$id/resize`
+ - [x] `POST`
+- `/linode/instances/$id/shutdown`
+ - [x] `POST`
+- `/linode/instances/$id/volumes`
+ - [X] `GET`
+
+### Backups
+
+- `/linode/instances/$id/backups`
+ - [X] `GET`
+ - [ ] `POST`
+- `/linode/instances/$id/backups/$id/restore`
+ - [ ] `POST`
+- `/linode/instances/$id/backups/cancel`
+ - [ ] `POST`
+- `/linode/instances/$id/backups/enable`
+ - [ ] `POST`
+
+### Configs
+
+- `/linode/instances/$id/configs`
+ - [X] `GET`
+ - [X] `POST`
+- `/linode/instances/$id/configs/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+
+### Disks
+
+- `/linode/instances/$id/disks`
+ - [X] `GET`
+ - [X] `POST`
+- `/linode/instances/$id/disks/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `POST`
+ - [X] `DELETE`
+- `/linode/instances/$id/disks/$id/password`
+ - [X] `POST`
+- `/linode/instances/$id/disks/$id/resize`
+ - [X] `POST`
+
+### IPs
+
+- `/linode/instances/$id/ips`
+ - [X] `GET`
+ - [X] `POST`
+- `/linode/instances/$id/ips/$ip_address`
+ - [X] `GET`
+ - [X] `PUT`
+ - [ ] `DELETE`
+- `/linode/instances/$id/ips/sharing`
+ - [ ] `POST`
+
+### Kernels
+
+- `/linode/kernels`
+ - [X] `GET`
+- `/linode/kernels/$id`
+ - [X] `GET`
+
+### StackScripts
+
+- `/linode/stackscripts`
+ - [x] `GET`
+ - [X] `POST`
+- `/linode/stackscripts/$id`
+ - [x] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+
+### Stats
+
+- `/linode/instances/$id/stats`
+ - [X] `GET`
+- `/linode/instances/$id/stats/$year/$month`
+ - [X] `GET`
+
+### Types
+
+- `/linode/types`
+ - [X] `GET`
+- `/linode/types/$id`
+ - [X] `GET`
+
+## Domains
+
+- `/domains`
+ - [X] `GET`
+ - [X] `POST`
+- `/domains/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/domains/$id/clone`
+ - [ ] `POST`
+- `/domains/$id/records`
+ - [X] `GET`
+ - [X] `POST`
+- `/domains/$id/records/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+
+## LKE
+
+- `/lke/clusters`
+ - [X] `POST`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/lke/clusters/$id/pools`
+ - [X] `POST`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/lke/clusters/$id/api-endpoint`
+ - [X] `GET`
+- `/lke/clusters/$id/kubeconfig`
+ - [X] `GET`
+- `/lke/clusters/$id/versions`
+ - [X] `GET`
+- `/lke/clusters/$id/versions/$id`
+ - [X] `GET`
+
+## Longview
+
+- `/longview/clients`
+ - [X] `GET`
+ - [ ] `POST`
+- `/longview/clients/$id`
+ - [X] `GET`
+ - [ ] `PUT`
+ - [ ] `DELETE`
+
+### Subscriptions
+
+- `/longview/subscriptions`
+ - [ ] `GET`
+- `/longview/subscriptions/$id`
+ - [ ] `GET`
+
+### NodeBalancers
+
+- `/nodebalancers`
+ - [X] `GET`
+ - [X] `POST`
+- `/nodebalancers/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/nodebalancers/$id/stats`
+ - [X] `GET`
+
+### NodeBalancer Configs
+
+- `/nodebalancers/$id/configs`
+ - [X] `GET`
+ - [X] `POST`
+- `/nodebalancers/$id/configs/$id`
+ - [X] `GET`
+ - [X] `DELETE`
+- `/nodebalancers/$id/configs/$id/nodes`
+ - [X] `GET`
+ - [X] `POST`
+- `/nodebalancers/$id/configs/$id/nodes/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/nodebalancers/$id/configs/$id/rebuild`
+ - [X] `POST`
+
+## Networking
+
+- `/networking/ip-assign`
+ - [ ] `POST`
+- `/networking/ips`
+ - [X] `GET`
+ - [ ] `POST`
+- `/networking/ips/$address`
+ - [X] `GET`
+ - [X] `PUT`
+ - [ ] `DELETE`
+
+### IPv6
+
+- `/networking/ips`
+ - [X] `GET`
+- `/networking/ips/$address`
+ - [X] `GET`
+ - [ ] `PUT`
+- `/networking/ipv6/ranges`
+ - [X] `GET`
+- `/networking/ipv6/pools`
+ - [X] `GET`
+
+## Regions
+
+- `/regions`
+ - [x] `GET`
+- `/regions/$id`
+ - [x] `GET`
+
+## Support
+
+- `/support/tickets`
+ - [X] `GET`
+ - [ ] `POST`
+- `/support/tickets/$id`
+ - [X] `GET`
+- `/support/tickets/$id/attachments`
+ - [ ] `POST`
+- `/support/tickets/$id/replies`
+ - [ ] `GET`
+ - [ ] `POST`
+
+## Tags
+
+- `/tags/`
+ - [X] `GET`
+ - [X] `POST`
+- `/tags/$id`
+ - [X] `GET`
+ - [X] `DELETE`
+
+## Account
+
+### Events
+
+- `/account/events`
+ - [X] `GET`
+- `/account/events/$id`
+ - [X] `GET`
+- `/account/events/$id/read`
+ - [X] `POST`
+- `/account/events/$id/seen`
+ - [X] `POST`
+
+### Invoices
+
+- `/account/invoices/`
+ - [X] `GET`
+- `/account/invoices/$id`
+ - [X] `GET`
+- `/account/invoices/$id/items`
+ - [X] `GET`
+
+### Notifications
+
+- `/account/notifications`
+ - [X] `GET`
+
+### OAuth Clients
+
+- `/account/oauth-clients`
+ - [X] `GET`
+ - [X] `POST`
+- `/account/oauth-clients/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/account/oauth-clients/$id/reset_secret`
+ - [ ] `POST`
+- `/account/oauth-clients/$id/thumbnail`
+ - [ ] `GET`
+ - [ ] `PUT`
+
+### Object Storage Keys
+
+- `/object-storage/keys`
+ - [X] `GET`
+ - [X] `POST`
+- `/object-storage/keys/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+
+### Object Storage Clusters
+- `/object-storage/clusters`
+ - [X] `GET`
+- `/object-storage/clusters/$id`
+ - [X] `GET`
+
+### Object Storage Buckets
+
+- `/object-storage/buckets`
+ - [X] `GET`
+ - [X] `POST`
+- `/object-storage/buckets/$id/$id`
+ - [X] `GET`
+ - [X] `DELETE`
+
+### Payments
+
+- `/account/payments`
+ - [X] `GET`
+ - [X] `POST`
+- `/account/payments/$id`
+ - [X] `GET`
+- `/account/payments/paypal`
+ - [ ] `GET`
+- `/account/payments/paypal/execute`
+ - [ ] `POST`
+
+### Settings
+
+- `/account/settings`
+ - [X] `GET`
+ - [X] `PUT`
+
+### Users
+
+- `/account/users`
+ - [X] `GET`
+ - [X] `POST`
+- `/account/users/$username`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/account/users/$username/grants`
+ - [ ] `GET`
+ - [ ] `PUT`
+- `/account/users/$username/password`
+ - [ ] `POST`
+
+## Profile
+
+### Personalized User Settings
+
+- `/profile`
+ - [X] `GET`
+ - [X] `PUT`
+
+### Granted OAuth Apps
+
+- `/profile/apps`
+ - [ ] `GET`
+- `/profile/apps/$id`
+ - [ ] `GET`
+ - [ ] `DELETE`
+
+### Grants to Linode Resources
+
+- `/profile/grants`
+ - [ ] `GET`
+
+### SSH Keys
+
+- `/profile/sshkeys`
+ - [x] `GET`
+ - [x] `POST`
+- `/profile/sshkeys/$id`
+ - [x] `GET`
+ - [x] `PUT`
+ - [x] `DELETE`
+
+### Two-Factor
+
+- `/profile/tfa-disable`
+ - [ ] `POST`
+- `/profile/tfa-enable`
+ - [ ] `POST`
+- `/profile/tfa-enable-confirm`
+ - [ ] `POST`
+
+### Personal Access API Tokens
+
+- `/profile/tokens`
+ - [X] `GET`
+ - [X] `POST`
+- `/profile/tokens/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+
+## Images
+
+- `/images`
+ - [x] `GET`
+- `/images/$id`
+ - [x] `GET`
+ - [X] `POST`
+ - [X] `PUT`
+ - [X] `DELETE`
+
+## Volumes
+
+- `/volumes`
+ - [X] `GET`
+ - [X] `POST`
+- `/volumes/$id`
+ - [X] `GET`
+ - [X] `PUT`
+ - [X] `DELETE`
+- `/volumes/$id/attach`
+ - [X] `POST`
+- `/volumes/$id/clone`
+ - [X] `POST`
+- `/volumes/$id/detach`
+ - [X] `POST`
+- `/volumes/$id/resize`
+ - [X] `POST`
diff --git a/vendor/github.com/linode/linodego/CHANGELOG.md b/vendor/github.com/linode/linodego/CHANGELOG.md
new file mode 100644
index 000000000..70bf97e87
--- /dev/null
+++ b/vendor/github.com/linode/linodego/CHANGELOG.md
@@ -0,0 +1 @@
+Release notes for this project are kept here: https://github.com/linode/linodego/releases
diff --git a/vendor/github.com/linode/linodego/LICENSE b/vendor/github.com/linode/linodego/LICENSE
new file mode 100644
index 000000000..6b0e6bace
--- /dev/null
+++ b/vendor/github.com/linode/linodego/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Christopher "Chief" Najewicz
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/linode/linodego/Makefile b/vendor/github.com/linode/linodego/Makefile
new file mode 100644
index 000000000..810c453a2
--- /dev/null
+++ b/vendor/github.com/linode/linodego/Makefile
@@ -0,0 +1,74 @@
+-include .env
+BIN_DIR := $(GOPATH)/bin
+
+INTEGRATION_DIR := ./test/integration
+FIXTURES_DIR := $(INTEGRATION_DIR)/fixtures
+
+GOLANGCILINT := golangci-lint
+GOLANGCILINT_IMG := golangci/golangci-lint:v1.38-alpine
+GOLANGCILINT_ARGS := run
+
+PACKAGES := $(shell go list ./... | grep -v integration)
+
+SKIP_LINT ?= 0
+
+.PHONY: build vet test refresh-fixtures clean-fixtures lint run_fixtures sanitize fixtures godoc testint testunit tidy
+
+test: build lint testunit testint
+
+citest: lint test
+
+testunit:
+ go test -v $(PACKAGES) $(ARGS)
+
+testint:
+ cd test && make test
+
+build: vet lint
+ go build ./...
+ cd k8s && go build ./...
+
+vet:
+ go vet ./...
+ cd k8s && go vet ./...
+
+lint:
+ifeq ($(SKIP_LINT), 1)
+ @echo Skipping lint stage
+else
+ docker run --rm -v $(shell pwd):/app -w /app $(GOLANGCILINT_IMG) $(GOLANGCILINT) run
+endif
+
+clean-fixtures:
+ @-rm fixtures/*.yaml
+
+refresh-fixtures: clean-fixtures fixtures
+
+run_fixtures:
+ @echo "* Running fixtures"
+ cd $(INTEGRATION_DIR) && \
+ LINODE_FIXTURE_MODE="record" \
+ LINODE_TOKEN=$(LINODE_TOKEN) \
+ LINODE_API_VERSION="v4beta" \
+ GO111MODULE="on" \
+ go test -timeout=60m -v $(ARGS)
+
+sanitize:
+ @echo "* Sanitizing fixtures"
+ @for yaml in $(FIXTURES_DIR)/*yaml; do \
+ sed -E -i.bak \
+ -e 's_stats/20[0-9]{2}/[1-9][0-2]?_stats/2018/1_g' \
+ -e 's/(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))/1234::5678/g' \
+ $$yaml; \
+ done
+ @find $(FIXTURES_DIR) -name *yaml.bak -exec rm {} \;
+
+fixtures: run_fixtures sanitize
+
+godoc:
+ @godoc -http=:6060
+
+tidy:
+ go mod tidy
+ cd k8s && go mod tidy
+ cd test && go mod tidy
diff --git a/vendor/github.com/linode/linodego/README.md b/vendor/github.com/linode/linodego/README.md
new file mode 100644
index 000000000..9a072d4b7
--- /dev/null
+++ b/vendor/github.com/linode/linodego/README.md
@@ -0,0 +1,173 @@
+# linodego
+
+[![Build Status](https://travis-ci.com/linode/linodego.svg?branch=master)](https://travis-ci.com/linode/linodego)
+[![Release](https://img.shields.io/github/v/release/linode/linodego)](https://github.com/linode/linodego/releases/latest)
+[![GoDoc](https://godoc.org/github.com/linode/linodego?status.svg)](https://godoc.org/github.com/linode/linodego)
+[![Go Report Card](https://goreportcard.com/badge/github.com/linode/linodego)](https://goreportcard.com/report/github.com/linode/linodego)
+[![codecov](https://codecov.io/gh/linode/linodego/branch/master/graph/badge.svg)](https://codecov.io/gh/linode/linodego)
+
+Go client for [Linode REST v4 API](https://developers.linode.com/api/v4)
+
+## Installation
+
+```sh
+go get -u github.com/linode/linodego
+```
+
+## API Support
+
+Check [API_SUPPORT.md](API_SUPPORT.md) for current support of the Linode `v4` API endpoints.
+
+** Note: This project will change and break until we release a v1.0.0 tagged version. Breaking changes in v0.x.x will be denoted with a minor version bump (v0.2.4 -> v0.3.0) **
+
+## Documentation
+
+See [godoc](https://godoc.org/github.com/linode/linodego) for a complete reference.
+
+The API generally follows the naming patterns prescribed in the [OpenAPIv3 document for Linode APIv4](https://developers.linode.com/api/v4).
+
+Deviations in naming have been made to avoid using "Linode" and "Instance" redundantly or inconsistently.
+
+A brief summary of the features offered in this API client are shown here.
+
+## Examples
+
+### General Usage
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/linode/linodego"
+ "golang.org/x/oauth2"
+
+ "log"
+ "net/http"
+ "os"
+)
+
+func main() {
+ apiKey, ok := os.LookupEnv("LINODE_TOKEN")
+ if !ok {
+ log.Fatal("Could not find LINODE_TOKEN, please assert it is set.")
+ }
+ tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: apiKey})
+
+ oauth2Client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: tokenSource,
+ },
+ }
+
+ linodeClient := linodego.NewClient(oauth2Client)
+ linodeClient.SetDebug(true)
+
+ res, err := linodeClient.GetInstance(context.Background(), 4090913)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%v", res)
+}
+```
+
+### Pagination
+
+#### Auto-Pagination Requests
+
+```go
+kernels, err := linodego.ListKernels(context.Background(), nil)
+// len(kernels) == 218
+```
+
+Or, use a page value of "0":
+
+```go
+opts := linodego.NewListOptions(0,"")
+kernels, err := linodego.ListKernels(context.Background(), opts)
+// len(kernels) == 218
+```
+
+#### Single Page
+
+```go
+opts := linodego.NewListOptions(2,"")
+// or opts := linodego.ListOptions{PageOptions: &linodego.PageOptions{Page: 2}, PageSize: 500}
+kernels, err := linodego.ListKernels(context.Background(), opts)
+// len(kernels) == 100
+```
+
+ListOptions are supplied as a pointer because the Pages and Results
+values are set in the supplied ListOptions.
+
+```go
+// opts.Results == 218
+```
+
+#### Filtering
+
+```go
+opts := linodego.ListOptions{Filter: "{\"mine\":true}"}
+// or opts := linodego.NewListOptions(0, "{\"mine\":true}")
+stackscripts, err := linodego.ListStackscripts(context.Background(), opts)
+```
+
+### Error Handling
+
+#### Getting Single Entities
+
+```go
+linode, err := linodego.GetInstance(context.Background(), 555) // any Linode ID that does not exist or is not yours
+// linode == nil: true
+// err.Error() == "[404] Not Found"
+// err.Code == "404"
+// err.Message == "Not Found"
+```
+
+#### Lists
+
+For lists, the list is still returned as `[]`, but `err` works the same way as on the `Get` request.
+
+```go
+linodes, err := linodego.ListInstances(context.Background(), linodego.NewListOptions(0, "{\"foo\":bar}"))
+// linodes == []
+// err.Error() == "[400] [X-Filter] Cannot filter on foo"
+```
+
+Otherwise sane requests beyond the last page do not trigger an error, just an empty result:
+
+```go
+linodes, err := linodego.ListInstances(context.Background(), linodego.NewListOptions(9999, ""))
+// linodes == []
+// err = nil
+```
+
+### Writes
+
+When performing a `POST` or `PUT` request, multiple field related errors will be returned as a single error, currently like:
+
+```go
+// err.Error() == "[400] [field1] foo problem; [field2] bar problem; [field3] baz problem"
+```
+
+## Tests
+
+Run `make testunit` to run the unit tests.
+
+Run `make testint` to run the integration tests. The integration tests use fixtures.
+
+To update the test fixtures, run `make fixtures`. This will record the API responses into the `fixtures/` directory.
+Be careful about committing any sensitive account details. An attempt has been made to sanitize IP addresses and
+dates, but no automated sanitization will be performed against `fixtures/*Account*.yaml`, for example.
+
+To prevent disrupting unaffected fixtures, target fixture generation like so: `make ARGS="-run TestListVolumes" fixtures`.
+
+## Discussion / Help
+
+Join us at [#linodego](https://gophers.slack.com/messages/CAG93EB2S) on the [gophers slack](https://gophers.slack.com)
+
+## License
+
+[MIT License](LICENSE)
diff --git a/vendor/github.com/linode/linodego/account.go b/vendor/github.com/linode/linodego/account.go
new file mode 100644
index 000000000..46439e1d9
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account.go
@@ -0,0 +1,43 @@
+package linodego
+
+import "context"
+
+// Account associated with the token in use.
+type Account struct {
+ FirstName string `json:"first_name"`
+ LastName string `json:"last_name"`
+ Email string `json:"email"`
+ Company string `json:"company"`
+ Address1 string `json:"address_1"`
+ Address2 string `json:"address_2"`
+ Balance float32 `json:"balance"`
+ BalanceUninvoiced float32 `json:"balance_uninvoiced"`
+ City string `json:"city"`
+ State string `json:"state"`
+ Zip string `json:"zip"`
+ Country string `json:"country"`
+ TaxID string `json:"tax_id"`
+ Phone string `json:"phone"`
+ CreditCard *CreditCard `json:"credit_card"`
+}
+
+// CreditCard information associated with the Account.
+type CreditCard struct {
+ LastFour string `json:"last_four"`
+ Expiry string `json:"expiry"`
+}
+
+// GetAccount gets the contact and billing information related to the Account.
+func (c *Client) GetAccount(ctx context.Context) (*Account, error) {
+ e, err := c.Account.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Account{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Account), nil
+}
diff --git a/vendor/github.com/linode/linodego/account_events.go b/vendor/github.com/linode/linodego/account_events.go
new file mode 100644
index 000000000..6433711ea
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_events.go
@@ -0,0 +1,280 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/duration"
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// Event represents an action taken on the Account.
+type Event struct {
+ // The unique ID of this Event.
+ ID int `json:"id"`
+
+ // Current status of the Event, Enum: "failed" "finished" "notification" "scheduled" "started"
+ Status EventStatus `json:"status"`
+
+ // The action that caused this Event. New actions may be added in the future.
+ Action EventAction `json:"action"`
+
+ // A percentage estimating the amount of time remaining for an Event. Returns null for notification events.
+ PercentComplete int `json:"percent_complete"`
+
+ // The rate of completion of the Event. Only some Events will return rate; for example, migration and resize Events.
+ Rate *string `json:"rate"`
+
+ // If this Event has been read.
+ Read bool `json:"read"`
+
+ // If this Event has been seen.
+ Seen bool `json:"seen"`
+
+ // The estimated time remaining until the completion of this Event. This value is only returned for in-progress events.
+ TimeRemaining *int `json:"-"`
+
+ // The username of the User who caused the Event.
+ Username string `json:"username"`
+
+ // Detailed information about the Event's entity, including ID, type, label, and URL used to access it.
+ Entity *EventEntity `json:"entity"`
+
+ // Detailed information about the Event's secondary or related entity, including ID, type, label, and URL used to access it.
+ SecondaryEntity *EventEntity `json:"secondary_entity"`
+
+ // When this Event was created.
+ Created *time.Time `json:"-"`
+}
+
+// EventAction constants start with Action and include all known Linode API Event Actions.
+type EventAction string
+
+// EventAction constants represent the actions that cause an Event. New actions may be added in the future.
+const (
+ ActionAccountUpdate EventAction = "account_update"
+ ActionAccountSettingsUpdate EventAction = "account_settings_update"
+ ActionBackupsEnable EventAction = "backups_enable"
+ ActionBackupsCancel EventAction = "backups_cancel"
+ ActionBackupsRestore EventAction = "backups_restore"
+ ActionCommunityQuestionReply EventAction = "community_question_reply"
+ ActionCommunityLike EventAction = "community_like"
+ ActionCreateCardUpdated EventAction = "credit_card_updated"
+ ActionDiskCreate EventAction = "disk_create"
+ ActionDiskDelete EventAction = "disk_delete"
+ ActionDiskUpdate EventAction = "disk_update"
+ ActionDiskDuplicate EventAction = "disk_duplicate"
+ ActionDiskImagize EventAction = "disk_imagize"
+ ActionDiskResize EventAction = "disk_resize"
+ ActionDNSRecordCreate EventAction = "dns_record_create"
+ ActionDNSRecordDelete EventAction = "dns_record_delete"
+ ActionDNSRecordUpdate EventAction = "dns_record_update"
+ ActionDNSZoneCreate EventAction = "dns_zone_create"
+ ActionDNSZoneDelete EventAction = "dns_zone_delete"
+ ActionDNSZoneUpdate EventAction = "dns_zone_update"
+ ActionFirewallCreate EventAction = "firewall_create"
+ ActionFirewallDelete EventAction = "firewall_delete"
+ ActionFirewallDisable EventAction = "firewall_disable"
+ ActionFirewallEnable EventAction = "firewall_enable"
+ ActionFirewallUpdate EventAction = "firewall_update"
+ ActionFirewallDeviceAdd EventAction = "firewall_device_add"
+ ActionFirewallDeviceRemove EventAction = "firewall_device_remove"
+ ActionHostReboot EventAction = "host_reboot"
+ ActionImageDelete EventAction = "image_delete"
+ ActionImageUpdate EventAction = "image_update"
+ ActionLassieReboot EventAction = "lassie_reboot"
+ ActionLinodeAddIP EventAction = "linode_addip"
+ ActionLinodeBoot EventAction = "linode_boot"
+ ActionLinodeClone EventAction = "linode_clone"
+ ActionLinodeCreate EventAction = "linode_create"
+ ActionLinodeDelete EventAction = "linode_delete"
+ ActionLinodeUpdate EventAction = "linode_update"
+ ActionLinodeDeleteIP EventAction = "linode_deleteip"
+ ActionLinodeMigrate EventAction = "linode_migrate"
+ ActionLinodeMutate EventAction = "linode_mutate"
+ ActionLinodeMutateCreate EventAction = "linode_mutate_create"
+ ActionLinodeReboot EventAction = "linode_reboot"
+ ActionLinodeRebuild EventAction = "linode_rebuild"
+ ActionLinodeResize EventAction = "linode_resize"
+ ActionLinodeResizeCreate EventAction = "linode_resize_create"
+ ActionLinodeShutdown EventAction = "linode_shutdown"
+ ActionLinodeSnapshot EventAction = "linode_snapshot"
+ ActionLinodeConfigCreate EventAction = "linode_config_create"
+ ActionLinodeConfigDelete EventAction = "linode_config_delete"
+ ActionLinodeConfigUpdate EventAction = "linode_config_update"
+ ActionLishBoot EventAction = "lish_boot"
+ ActionLKENodeCreate EventAction = "lke_node_create"
+ ActionLongviewClientCreate EventAction = "longviewclient_create"
+ ActionLongviewClientDelete EventAction = "longviewclient_delete"
+ ActionLongviewClientUpdate EventAction = "longviewclient_update"
+ ActionManagedDisabled EventAction = "managed_disabled"
+ ActionManagedEnabled EventAction = "managed_enabled"
+ ActionManagedServiceCreate EventAction = "managed_service_create"
+ ActionManagedServiceDelete EventAction = "managed_service_delete"
+ ActionNodebalancerCreate EventAction = "nodebalancer_create"
+ ActionNodebalancerDelete EventAction = "nodebalancer_delete"
+ ActionNodebalancerUpdate EventAction = "nodebalancer_update"
+ ActionNodebalancerConfigCreate EventAction = "nodebalancer_config_create"
+ ActionNodebalancerConfigDelete EventAction = "nodebalancer_config_delete"
+ ActionNodebalancerConfigUpdate EventAction = "nodebalancer_config_update"
+ ActionPasswordReset EventAction = "password_reset"
+ ActionPaymentSubmitted EventAction = "payment_submitted"
+ ActionStackScriptCreate EventAction = "stackscript_create"
+ ActionStackScriptDelete EventAction = "stackscript_delete"
+ ActionStackScriptUpdate EventAction = "stackscript_update"
+ ActionStackScriptPublicize EventAction = "stackscript_publicize"
+ ActionStackScriptRevise EventAction = "stackscript_revise"
+ ActionTFADisabled EventAction = "tfa_disabled"
+ ActionTFAEnabled EventAction = "tfa_enabled"
+ ActionTicketAttachmentUpload EventAction = "ticket_attachment_upload"
+ ActionTicketCreate EventAction = "ticket_create"
+ ActionTicketUpdate EventAction = "ticket_update"
+ ActionVolumeAttach EventAction = "volume_attach"
+ ActionVolumeClone EventAction = "volume_clone"
+ ActionVolumeCreate EventAction = "volume_create"
+ ActionVolumeDelte EventAction = "volume_delete"
+ ActionVolumeUpdate EventAction = "volume_update"
+ ActionVolumeDetach EventAction = "volume_detach"
+ ActionVolumeResize EventAction = "volume_resize"
+)
+
+// EntityType constants start with Entity and include Linode API Event Entity Types
+type EntityType string
+
+// EntityType contants are the entities an Event can be related to.
+const (
+ EntityLinode EntityType = "linode"
+ EntityDisk EntityType = "disk"
+ EntityDomain EntityType = "domain"
+ EntityFirewall EntityType = "firewall"
+ EntityNodebalancer EntityType = "nodebalancer"
+)
+
+// EventStatus constants start with Event and include Linode API Event Status values
+type EventStatus string
+
+// EventStatus constants reflect the current status of an Event
+const (
+ EventFailed EventStatus = "failed"
+ EventFinished EventStatus = "finished"
+ EventNotification EventStatus = "notification"
+ EventScheduled EventStatus = "scheduled"
+ EventStarted EventStatus = "started"
+)
+
+// EventEntity provides detailed information about the Event's
+// associated entity, including ID, Type, Label, and a URL that
+// can be used to access it.
+type EventEntity struct {
+ // ID may be a string or int, it depends on the EntityType
+ ID interface{} `json:"id"`
+ Label string `json:"label"`
+ Type EntityType `json:"type"`
+ URL string `json:"url"`
+}
+
+// EventsPagedResponse represents a paginated Events API response
+type EventsPagedResponse struct {
+ *PageOptions
+ Data []Event `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Event
+func (EventsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Events.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Event) UnmarshalJSON(b []byte) error {
+ type Mask Event
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ TimeRemaining json.RawMessage `json:"time_remaining"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.TimeRemaining = duration.UnmarshalTimeRemaining(p.TimeRemaining)
+
+ return nil
+}
+
+// endpointWithID gets the endpoint URL for a specific Event
+func (i Event) endpointWithID(c *Client) string {
+ endpoint, err := c.Events.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ endpoint = fmt.Sprintf("%s/%d", endpoint, i.ID)
+
+ return endpoint
+}
+
+// appendData appends Events when processing paginated Event responses
+func (resp *EventsPagedResponse) appendData(r *EventsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListEvents gets a collection of Event objects representing actions taken
+// on the Account. The Events returned depend on the token grants and the grants
+// of the associated user.
+func (c *Client) ListEvents(ctx context.Context, opts *ListOptions) ([]Event, error) {
+ response := EventsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// GetEvent gets the Event with the Event ID
+func (c *Client) GetEvent(ctx context.Context, id int) (*Event, error) {
+ e, err := c.Events.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := c.R(ctx).SetResult(&Event{}).Get(e)
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Event), nil
+}
+
+// MarkEventRead marks a single Event as read.
+func (c *Client) MarkEventRead(ctx context.Context, event *Event) error {
+ e := event.endpointWithID(c)
+ e = fmt.Sprintf("%s/read", e)
+
+ _, err := coupleAPIErrors(c.R(ctx).Post(e))
+
+ return err
+}
+
+// MarkEventsSeen marks all Events up to and including this Event by ID as seen.
+func (c *Client) MarkEventsSeen(ctx context.Context, event *Event) error {
+ e := event.endpointWithID(c)
+ e = fmt.Sprintf("%s/seen", e)
+
+ _, err := coupleAPIErrors(c.R(ctx).Post(e))
+
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/account_invoices.go b/vendor/github.com/linode/linodego/account_invoices.go
new file mode 100644
index 000000000..afa370a00
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_invoices.go
@@ -0,0 +1,151 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// Invoice structs reflect an invoice for billable activity on the account.
+type Invoice struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Total float32 `json:"total"`
+ Date *time.Time `json:"-"`
+}
+
+// InvoiceItem structs reflect an single billable activity associate with an Invoice
+type InvoiceItem struct {
+ Label string `json:"label"`
+ Type string `json:"type"`
+ UnitPrice int `json:"unitprice"`
+ Quantity int `json:"quantity"`
+ Amount float32 `json:"amount"`
+ From *time.Time `json:"-"`
+ To *time.Time `json:"-"`
+}
+
+// InvoicesPagedResponse represents a paginated Invoice API response
+type InvoicesPagedResponse struct {
+ *PageOptions
+ Data []Invoice `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Invoice
+func (InvoicesPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Invoices.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends Invoices when processing paginated Invoice responses
+func (resp *InvoicesPagedResponse) appendData(r *InvoicesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListInvoices gets a paginated list of Invoices against the Account
+func (c *Client) ListInvoices(ctx context.Context, opts *ListOptions) ([]Invoice, error) {
+ response := InvoicesPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Invoice) UnmarshalJSON(b []byte) error {
+ type Mask Invoice
+
+ p := struct {
+ *Mask
+ Date *parseabletime.ParseableTime `json:"date"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Date = (*time.Time)(p.Date)
+
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *InvoiceItem) UnmarshalJSON(b []byte) error {
+ type Mask InvoiceItem
+
+ p := struct {
+ *Mask
+ From *parseabletime.ParseableTime `json:"from"`
+ To *parseabletime.ParseableTime `json:"to"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.From = (*time.Time)(p.From)
+ i.To = (*time.Time)(p.To)
+
+ return nil
+}
+
+// GetInvoice gets the a single Invoice matching the provided ID
+func (c *Client) GetInvoice(ctx context.Context, id int) (*Invoice, error) {
+ e, err := c.Invoices.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Invoice{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Invoice), nil
+}
+
+// InvoiceItemsPagedResponse represents a paginated Invoice Item API response
+type InvoiceItemsPagedResponse struct {
+ *PageOptions
+ Data []InvoiceItem `json:"data"`
+}
+
+// endpointWithID gets the endpoint URL for InvoiceItems associated with a specific Invoice
+func (InvoiceItemsPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.InvoiceItems.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends InvoiceItems when processing paginated Invoice Item responses
+func (resp *InvoiceItemsPagedResponse) appendData(r *InvoiceItemsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListInvoiceItems gets the invoice items associated with a specific Invoice
+func (c *Client) ListInvoiceItems(ctx context.Context, id int, opts *ListOptions) ([]InvoiceItem, error) {
+ response := InvoiceItemsPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, id, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
diff --git a/vendor/github.com/linode/linodego/account_notifications.go b/vendor/github.com/linode/linodego/account_notifications.go
new file mode 100644
index 000000000..8c8d3c914
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_notifications.go
@@ -0,0 +1,115 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// Notification represents a notification on an Account
+type Notification struct {
+ Label string `json:"label"`
+ Body *string `json:"body"`
+ Message string `json:"message"`
+ Type NotificationType `json:"type"`
+ Severity NotificationSeverity `json:"severity"`
+ Entity *NotificationEntity `json:"entity"`
+ Until *time.Time `json:"-"`
+ When *time.Time `json:"-"`
+}
+
+// NotificationEntity adds detailed information about the Notification.
+// This could refer to the ticket that triggered the notification, for example.
+type NotificationEntity struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Type string `json:"type"`
+ URL string `json:"url"`
+}
+
+// NotificationSeverity constants start with Notification and include all known Linode API Notification Severities.
+type NotificationSeverity string
+
+// NotificationSeverity constants represent the actions that cause a Notification. New severities may be added in the future.
+const (
+ NotificationMinor NotificationSeverity = "minor"
+ NotificationMajor NotificationSeverity = "major"
+ NotificationCritical NotificationSeverity = "critical"
+)
+
+// NotificationType constants start with Notification and include all known Linode API Notification Types.
+type NotificationType string
+
+// NotificationType constants represent the actions that cause a Notification. New types may be added in the future.
+const (
+ NotificationMigrationScheduled NotificationType = "migration_scheduled"
+ NotificationMigrationImminent NotificationType = "migration_imminent"
+ NotificationMigrationPending NotificationType = "migration_pending"
+ NotificationRebootScheduled NotificationType = "reboot_scheduled"
+ NotificationOutage NotificationType = "outage"
+ NotificationPaymentDue NotificationType = "payment_due"
+ NotificationTicketImportant NotificationType = "ticket_important"
+ NotificationTicketAbuse NotificationType = "ticket_abuse"
+ NotificationNotice NotificationType = "notice"
+ NotificationMaintenance NotificationType = "maintenance"
+)
+
+// NotificationsPagedResponse represents a paginated Notifications API response
+type NotificationsPagedResponse struct {
+ *PageOptions
+ Data []Notification `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Notification
+func (NotificationsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Notifications.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends Notifications when processing paginated Notification responses
+func (resp *NotificationsPagedResponse) appendData(r *NotificationsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListNotifications gets a collection of Notification objects representing important,
+// often time-sensitive items related to the Account. An account cannot interact directly with
+// Notifications, and a Notification will disappear when the circumstances causing it
+// have been resolved. For example, if the account has an important Ticket open, a response
+// to the Ticket will dismiss the Notification.
+func (c *Client) ListNotifications(ctx context.Context, opts *ListOptions) ([]Notification, error) {
+ response := NotificationsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Notification) UnmarshalJSON(b []byte) error {
+ type Mask Notification
+
+ p := struct {
+ *Mask
+ Until *parseabletime.ParseableTime `json:"until"`
+ When *parseabletime.ParseableTime `json:"when"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Until = (*time.Time)(p.Until)
+ i.When = (*time.Time)(p.When)
+
+ return nil
+}
diff --git a/vendor/github.com/linode/linodego/account_oauth_client.go b/vendor/github.com/linode/linodego/account_oauth_client.go
new file mode 100644
index 000000000..03f0e58ab
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_oauth_client.go
@@ -0,0 +1,201 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// OAuthClientStatus constants start with OAuthClient and include Linode API Instance Status values
+type OAuthClientStatus string
+
+// OAuthClientStatus constants reflect the current status of an OAuth Client
+const (
+ OAuthClientActive OAuthClientStatus = "active"
+ OAuthClientDisabled OAuthClientStatus = "disabled"
+ OAuthClientSuspended OAuthClientStatus = "suspended"
+)
+
+// OAuthClient represents a OAuthClient object
+type OAuthClient struct {
+ // The unique ID of this OAuth Client.
+ ID string `json:"id"`
+
+ // The location a successful log in from https://login.linode.com should be redirected to for this client. The receiver of this redirect should be ready to accept an OAuth exchange code and finish the OAuth exchange.
+ RedirectURI string `json:"redirect_uri"`
+
+ // The name of this application. This will be presented to users when they are asked to grant it access to their Account.
+ Label string `json:"label"`
+
+ // Current status of the OAuth Client, Enum: "active" "disabled" "suspended"
+ Status OAuthClientStatus `json:"status"`
+
+ // The OAuth Client secret, used in the OAuth exchange. This is returned as except when an OAuth Client is created or its secret is reset. This is a secret, and should not be shared or disclosed publicly.
+ Secret string `json:"secret"`
+
+ // If this OAuth Client is public or private.
+ Public bool `json:"public"`
+
+ // The URL where this client's thumbnail may be viewed, or nil if this client does not have a thumbnail set.
+ ThumbnailURL *string `json:"thumbnail_url"`
+}
+
+// OAuthClientCreateOptions fields are those accepted by CreateOAuthClient
+type OAuthClientCreateOptions struct {
+ // The location a successful log in from https://login.linode.com should be redirected to for this client. The receiver of this redirect should be ready to accept an OAuth exchange code and finish the OAuth exchange.
+ RedirectURI string `json:"redirect_uri"`
+
+ // The name of this application. This will be presented to users when they are asked to grant it access to their Account.
+ Label string `json:"label"`
+
+ // If this OAuth Client is public or private.
+ Public bool `json:"public"`
+}
+
+// OAuthClientUpdateOptions fields are those accepted by UpdateOAuthClient
+type OAuthClientUpdateOptions struct {
+ // The location a successful log in from https://login.linode.com should be redirected to for this client. The receiver of this redirect should be ready to accept an OAuth exchange code and finish the OAuth exchange.
+ RedirectURI string `json:"redirect_uri"`
+
+ // The name of this application. This will be presented to users when they are asked to grant it access to their Account.
+ Label string `json:"label"`
+
+ // If this OAuth Client is public or private.
+ Public bool `json:"public"`
+}
+
+// GetCreateOptions converts a OAuthClient to OAuthClientCreateOptions for use in CreateOAuthClient
+func (i OAuthClient) GetCreateOptions() (o OAuthClientCreateOptions) {
+ o.RedirectURI = i.RedirectURI
+ o.Label = i.Label
+ o.Public = i.Public
+
+ return
+}
+
+// GetUpdateOptions converts a OAuthClient to OAuthClientUpdateOptions for use in UpdateOAuthClient
+func (i OAuthClient) GetUpdateOptions() (o OAuthClientUpdateOptions) {
+ o.RedirectURI = i.RedirectURI
+ o.Label = i.Label
+ o.Public = i.Public
+
+ return
+}
+
+// OAuthClientsPagedResponse represents a paginated OAuthClient API response
+type OAuthClientsPagedResponse struct {
+ *PageOptions
+ Data []OAuthClient `json:"data"`
+}
+
+// endpoint gets the endpoint URL for OAuthClient
+func (OAuthClientsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.OAuthClients.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends OAuthClients when processing paginated OAuthClient responses
+func (resp *OAuthClientsPagedResponse) appendData(r *OAuthClientsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListOAuthClients lists OAuthClients
+func (c *Client) ListOAuthClients(ctx context.Context, opts *ListOptions) ([]OAuthClient, error) {
+ response := OAuthClientsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// GetOAuthClient gets the OAuthClient with the provided ID
+func (c *Client) GetOAuthClient(ctx context.Context, id string) (*OAuthClient, error) {
+ e, err := c.OAuthClients.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&OAuthClient{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*OAuthClient), nil
+}
+
+// CreateOAuthClient creates an OAuthClient
+func (c *Client) CreateOAuthClient(ctx context.Context, createOpts OAuthClientCreateOptions) (*OAuthClient, error) {
+ var body string
+
+ e, err := c.OAuthClients.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&OAuthClient{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*OAuthClient), nil
+}
+
+// UpdateOAuthClient updates the OAuthClient with the specified id
+func (c *Client) UpdateOAuthClient(ctx context.Context, id string, updateOpts OAuthClientUpdateOptions) (*OAuthClient, error) {
+ var body string
+
+ e, err := c.OAuthClients.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+
+ req := c.R(ctx).SetResult(&OAuthClient{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*OAuthClient), nil
+}
+
+// DeleteOAuthClient deletes the OAuthClient with the specified id
+func (c *Client) DeleteOAuthClient(ctx context.Context, id string) error {
+ e, err := c.OAuthClients.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/account_payments.go b/vendor/github.com/linode/linodego/account_payments.go
new file mode 100644
index 000000000..e3085178c
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_payments.go
@@ -0,0 +1,132 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// Payment represents a Payment object
+type Payment struct {
+ // The unique ID of the Payment
+ ID int `json:"id"`
+
+ // The amount, in US dollars, of the Payment.
+ USD json.Number `json:"usd,Number"`
+
+ // When the Payment was made.
+ Date *time.Time `json:"-"`
+}
+
+// PaymentCreateOptions fields are those accepted by CreatePayment
+type PaymentCreateOptions struct {
+ // CVV (Card Verification Value) of the credit card to be used for the Payment
+ CVV string `json:"cvv,omitempty"`
+
+ // The amount, in US dollars, of the Payment
+ USD json.Number `json:"usd,Number"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Payment) UnmarshalJSON(b []byte) error {
+ type Mask Payment
+
+ p := struct {
+ *Mask
+ Date *parseabletime.ParseableTime `json:"date"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Date = (*time.Time)(p.Date)
+
+ return nil
+}
+
+// GetCreateOptions converts a Payment to PaymentCreateOptions for use in CreatePayment
+func (i Payment) GetCreateOptions() (o PaymentCreateOptions) {
+ o.USD = i.USD
+ return
+}
+
+// PaymentsPagedResponse represents a paginated Payment API response
+type PaymentsPagedResponse struct {
+ *PageOptions
+ Data []Payment `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Payment
+func (PaymentsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Payments.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends Payments when processing paginated Payment responses
+func (resp *PaymentsPagedResponse) appendData(r *PaymentsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListPayments lists Payments
+func (c *Client) ListPayments(ctx context.Context, opts *ListOptions) ([]Payment, error) {
+ response := PaymentsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// GetPayment gets the payment with the provided ID
+func (c *Client) GetPayment(ctx context.Context, id int) (*Payment, error) {
+ e, err := c.Payments.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Payment{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Payment), nil
+}
+
+// CreatePayment creates a Payment
+func (c *Client) CreatePayment(ctx context.Context, createOpts PaymentCreateOptions) (*Payment, error) {
+ var body string
+
+ e, err := c.Payments.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Payment{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Payment), nil
+}
diff --git a/vendor/github.com/linode/linodego/account_settings.go b/vendor/github.com/linode/linodego/account_settings.go
new file mode 100644
index 000000000..e2c826b51
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_settings.go
@@ -0,0 +1,75 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+)
+
+// AccountSettings are the account wide flags or plans that effect new resources
+type AccountSettings struct {
+ // The default backups enrollment status for all new Linodes for all users on the account. When enabled, backups are mandatory per instance.
+ BackupsEnabled bool `json:"backups_enabled"`
+
+ // Wether or not Linode Managed service is enabled for the account.
+ Managed bool `json:"managed"`
+
+ // Wether or not the Network Helper is enabled for all new Linode Instance Configs on the account.
+ NetworkHelper bool `json:"network_helper"`
+
+ // A plan name like "longview-3"..."longview-100", or a nil value for to cancel any existing subscription plan.
+ LongviewSubscription *string `json:"longview_subscription"`
+}
+
+// AccountSettingsUpdateOptions are the updateable account wide flags or plans that effect new resources.
+type AccountSettingsUpdateOptions struct {
+ // The default backups enrollment status for all new Linodes for all users on the account. When enabled, backups are mandatory per instance.
+ BackupsEnabled *bool `json:"backups_enabled,omitempty"`
+
+ // A plan name like "longview-3"..."longview-100", or a nil value for to cancel any existing subscription plan.
+ LongviewSubscription *string `json:"longview_subscription,omitempty"`
+
+ // The default network helper setting for all new Linodes and Linode Configs for all users on the account.
+ NetworkHelper *bool `json:"network_helper,omitempty"`
+}
+
+// GetAccountSettings gets the account wide flags or plans that effect new resources
+func (c *Client) GetAccountSettings(ctx context.Context) (*AccountSettings, error) {
+ e, err := c.AccountSettings.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&AccountSettings{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*AccountSettings), nil
+}
+
+// UpdateAccountSettings updates the settings associated with the account
+func (c *Client) UpdateAccountSettings(ctx context.Context, settings AccountSettingsUpdateOptions) (*AccountSettings, error) {
+ var body string
+
+ e, err := c.AccountSettings.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&AccountSettings{})
+
+ if bodyData, err := json.Marshal(settings); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*AccountSettings), nil
+}
diff --git a/vendor/github.com/linode/linodego/account_user_grants.go b/vendor/github.com/linode/linodego/account_user_grants.go
new file mode 100644
index 000000000..5ddeed41b
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_user_grants.go
@@ -0,0 +1,102 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+)
+
+type GrantPermissionLevel string
+
+const (
+ AccessLevelReadOnly GrantPermissionLevel = "read_only"
+ AccessLevelReadWrite GrantPermissionLevel = "read_write"
+)
+
+type GlobalUserGrants struct {
+ AccountAccess *GrantPermissionLevel `json:"account_access"`
+ AddDomains bool `json:"add_domains"`
+ AddFirewalls bool `json:"add_firewalls"`
+ AddImages bool `json:"add_images"`
+ AddLinodes bool `json:"add_linodes"`
+ AddLongview bool `json:"add_longview"`
+ AddNodeBalancers bool `json:"add_nodebalancers"`
+ AddStackScripts bool `json:"add_stackscripts"`
+ AddVolumes bool `json:"add_volumes"`
+ CancelAccount bool `json:"cancel_account"`
+ LongviewSubscription bool `json:"longview_subscription"`
+}
+
+type EntityUserGrant struct {
+ ID int `json:"id"`
+ Permissions *GrantPermissionLevel `json:"permissions"`
+}
+
+type GrantedEntity struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Permissions GrantPermissionLevel `json:"permissions"`
+}
+
+type UserGrants struct {
+ Domain []GrantedEntity `json:"domain"`
+ Firewall []GrantedEntity `json:"firewall"`
+ Image []GrantedEntity `json:"image"`
+ Linode []GrantedEntity `json:"linode"`
+ Longview []GrantedEntity `json:"longview"`
+ NodeBalancer []GrantedEntity `json:"nodebalancer"`
+ StackScript []GrantedEntity `json:"stackscript"`
+ Volume []GrantedEntity `json:"volume"`
+
+ Global GlobalUserGrants `json:"global"`
+}
+
+type UserGrantsUpdateOptions struct {
+ Domain []EntityUserGrant `json:"domain,omitempty"`
+ Firewall []EntityUserGrant `json:"firewall,omitempty"`
+ Image []EntityUserGrant `json:"image,omitempty"`
+ Linode []EntityUserGrant `json:"linode,omitempty"`
+ Longview []EntityUserGrant `json:"longview,omitempty"`
+ NodeBalancer []EntityUserGrant `json:"nodebalancer,omitempty"`
+ StackScript []EntityUserGrant `json:"stackscript,omitempty"`
+ Volume []EntityUserGrant `json:"volume,omitempty"`
+
+ Global GlobalUserGrants `json:"global"`
+}
+
+func (c *Client) GetUserGrants(ctx context.Context, username string) (*UserGrants, error) {
+ e, err := c.UserGrants.endpointWithParams(username)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&UserGrants{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*UserGrants), nil
+}
+
+func (c *Client) UpdateUserGrants(ctx context.Context, username string, updateOpts UserGrantsUpdateOptions) (*UserGrants, error) {
+ var body string
+
+ e, err := c.UserGrants.endpointWithParams(username)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&UserGrants{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.SetBody(body).Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*UserGrants), nil
+}
diff --git a/vendor/github.com/linode/linodego/account_users.go b/vendor/github.com/linode/linodego/account_users.go
new file mode 100644
index 000000000..09ac46fd2
--- /dev/null
+++ b/vendor/github.com/linode/linodego/account_users.go
@@ -0,0 +1,165 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// User represents a User object
+type User struct {
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Restricted bool `json:"restricted"`
+ TFAEnabled bool `json:"tfa_enabled"`
+ SSHKeys []string `json:"ssh_keys"`
+}
+
+// UserCreateOptions fields are those accepted by CreateUser
+type UserCreateOptions struct {
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Restricted bool `json:"restricted"`
+}
+
+// UserUpdateOptions fields are those accepted by UpdateUser
+type UserUpdateOptions struct {
+ Username string `json:"username,omitempty"`
+ Restricted *bool `json:"restricted,omitempty"`
+}
+
+// GetCreateOptions converts a User to UserCreateOptions for use in CreateUser
+func (i User) GetCreateOptions() (o UserCreateOptions) {
+ o.Username = i.Username
+ o.Email = i.Email
+ o.Restricted = i.Restricted
+
+ return
+}
+
+// GetUpdateOptions converts a User to UserUpdateOptions for use in UpdateUser
+func (i User) GetUpdateOptions() (o UserUpdateOptions) {
+ o.Username = i.Username
+ o.Restricted = copyBool(&i.Restricted)
+
+ return
+}
+
+// UsersPagedResponse represents a paginated User API response
+type UsersPagedResponse struct {
+ *PageOptions
+ Data []User `json:"data"`
+}
+
+// endpoint gets the endpoint URL for User
+func (UsersPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Users.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends Users when processing paginated User responses
+func (resp *UsersPagedResponse) appendData(r *UsersPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListUsers lists Users on the account
+func (c *Client) ListUsers(ctx context.Context, opts *ListOptions) ([]User, error) {
+ response := UsersPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// GetUser gets the user with the provided ID
+func (c *Client) GetUser(ctx context.Context, id string) (*User, error) {
+ e, err := c.Users.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&User{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*User), nil
+}
+
+// CreateUser creates a User. The email address must be confirmed before the
+// User account can be accessed.
+func (c *Client) CreateUser(ctx context.Context, createOpts UserCreateOptions) (*User, error) {
+ var body string
+
+ e, err := c.Users.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&User{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*User), nil
+}
+
+// UpdateUser updates the User with the specified id
+func (c *Client) UpdateUser(ctx context.Context, id string, updateOpts UserUpdateOptions) (*User, error) {
+ var body string
+
+ e, err := c.Users.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+
+ req := c.R(ctx).SetResult(&User{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*User), nil
+}
+
+// DeleteUser deletes the User with the specified id
+func (c *Client) DeleteUser(ctx context.Context, id string) error {
+ e, err := c.Users.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/client.go b/vendor/github.com/linode/linodego/client.go
new file mode 100644
index 000000000..a95315427
--- /dev/null
+++ b/vendor/github.com/linode/linodego/client.go
@@ -0,0 +1,425 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/go-resty/resty/v2"
+)
+
+const (
+ // APIHost Linode API hostname
+ APIHost = "api.linode.com"
+ // APIHostVar environment var to check for alternate API URL
+ APIHostVar = "LINODE_URL"
+ // APIHostCert environment var containing path to CA cert to validate against
+ APIHostCert = "LINODE_CA"
+ // APIVersion Linode API version
+ APIVersion = "v4"
+ // APIVersionVar environment var to check for alternate API Version
+ APIVersionVar = "LINODE_API_VERSION"
+ // APIProto connect to API with http(s)
+ APIProto = "https"
+ // APIEnvVar environment var to check for API token
+ APIEnvVar = "LINODE_TOKEN"
+ // APISecondsPerPoll how frequently to poll for new Events or Status in WaitFor functions
+ APISecondsPerPoll = 3
+ // Maximum wait time for retries
+ APIRetryMaxWaitTime = time.Duration(30) * time.Second
+)
+
+var envDebug = false
+
+// Client is a wrapper around the Resty client
+type Client struct {
+ resty *resty.Client
+ userAgent string
+ resources map[string]*Resource
+ debug bool
+ retryConditionals []RetryConditional
+
+ millisecondsPerPoll time.Duration
+
+ Account *Resource
+ AccountSettings *Resource
+ DomainRecords *Resource
+ Domains *Resource
+ Events *Resource
+ Firewalls *Resource
+ FirewallDevices *Resource
+ FirewallRules *Resource
+ IPAddresses *Resource
+ IPv6Pools *Resource
+ IPv6Ranges *Resource
+ Images *Resource
+ InstanceConfigs *Resource
+ InstanceDisks *Resource
+ InstanceIPs *Resource
+ InstanceSnapshots *Resource
+ InstanceStats *Resource
+ InstanceVolumes *Resource
+ Instances *Resource
+ InvoiceItems *Resource
+ Invoices *Resource
+ Kernels *Resource
+ LKEClusters *Resource
+ LKEClusterAPIEndpoints *Resource
+ LKEClusterPools *Resource
+ LKEVersions *Resource
+ Longview *Resource
+ LongviewClients *Resource
+ LongviewSubscriptions *Resource
+ Managed *Resource
+ NodeBalancerConfigs *Resource
+ NodeBalancerNodes *Resource
+ NodeBalancerStats *Resource
+ NodeBalancers *Resource
+ Notifications *Resource
+ OAuthClients *Resource
+ ObjectStorageBuckets *Resource
+ ObjectStorageBucketCerts *Resource
+ ObjectStorageClusters *Resource
+ ObjectStorageKeys *Resource
+ Payments *Resource
+ Profile *Resource
+ Regions *Resource
+ SSHKeys *Resource
+ StackScripts *Resource
+ Tags *Resource
+ Tickets *Resource
+ Token *Resource
+ Tokens *Resource
+ Types *Resource
+ UserGrants *Resource
+ Users *Resource
+ VLANs *Resource
+ Volumes *Resource
+}
+
+func init() {
+ // Wether or not we will enable Resty debugging output
+ if apiDebug, ok := os.LookupEnv("LINODE_DEBUG"); ok {
+ if parsed, err := strconv.ParseBool(apiDebug); err == nil {
+ envDebug = parsed
+ log.Println("[INFO] LINODE_DEBUG being set to", envDebug)
+ } else {
+ log.Println("[WARN] LINODE_DEBUG should be an integer, 0 or 1")
+ }
+ }
+}
+
+// SetUserAgent sets a custom user-agent for HTTP requests
+func (c *Client) SetUserAgent(ua string) *Client {
+ c.userAgent = ua
+ c.resty.SetHeader("User-Agent", c.userAgent)
+
+ return c
+}
+
+// R wraps resty's R method
+func (c *Client) R(ctx context.Context) *resty.Request {
+ return c.resty.R().
+ ExpectContentType("application/json").
+ SetHeader("Content-Type", "application/json").
+ SetContext(ctx).
+ SetError(APIError{})
+}
+
+// SetDebug sets the debug on resty's client
+func (c *Client) SetDebug(debug bool) *Client {
+ c.debug = debug
+ c.resty.SetDebug(debug)
+
+ return c
+}
+
+// SetBaseURL sets the base URL of the Linode v4 API (https://api.linode.com/v4)
+func (c *Client) SetBaseURL(url string) *Client {
+ c.resty.SetHostURL(url)
+ return c
+}
+
+// SetAPIVersion sets the version of the API to interface with
+func (c *Client) SetAPIVersion(apiVersion string) *Client {
+ c.SetBaseURL(fmt.Sprintf("%s://%s/%s", APIProto, APIHost, apiVersion))
+ return c
+}
+
+// SetRootCertificate adds a root certificate to the underlying TLS client config
+func (c *Client) SetRootCertificate(path string) *Client {
+ c.resty.SetRootCertificate(path)
+ return c
+}
+
+// SetToken sets the API token for all requests from this client
+// Only necessary if you haven't already provided an http client to NewClient() configured with the token.
+func (c *Client) SetToken(token string) *Client {
+ c.resty.SetHeader("Authorization", fmt.Sprintf("Bearer %s", token))
+ return c
+}
+
+// SetRetries adds retry conditions for "Linode Busy." errors and 429s.
+func (c *Client) SetRetries() *Client {
+ c.
+ addRetryConditional(linodeBusyRetryCondition).
+ addRetryConditional(tooManyRequestsRetryCondition).
+ addRetryConditional(serviceUnavailableRetryCondition).
+ addRetryConditional(requestTimeoutRetryCondition).
+ SetRetryMaxWaitTime(APIRetryMaxWaitTime)
+ configureRetries(c)
+ return c
+}
+
+func (c *Client) addRetryConditional(retryConditional RetryConditional) *Client {
+ c.retryConditionals = append(c.retryConditionals, retryConditional)
+ return c
+}
+
+// SetRetryMaxWaitTime sets the maximum delay before retrying a request.
+func (c *Client) SetRetryMaxWaitTime(max time.Duration) *Client {
+ c.resty.SetRetryMaxWaitTime(max)
+ return c
+}
+
+// SetRetryWaitTime sets the default (minimum) delay before retrying a request.
+func (c *Client) SetRetryWaitTime(min time.Duration) *Client {
+ c.resty.SetRetryWaitTime(min)
+ return c
+}
+
+// SetRetryAfter sets the callback function to be invoked with a failed request
+// to determine wben it should be retried.
+func (c *Client) SetRetryAfter(callback RetryAfter) *Client {
+ c.resty.SetRetryAfter(resty.RetryAfterFunc(callback))
+ return c
+}
+
+// SetRetryCount sets the maximum retry attempts before aborting.
+func (c *Client) SetRetryCount(count int) *Client {
+ c.resty.SetRetryCount(count)
+ return c
+}
+
+// SetPollDelay sets the number of milliseconds to wait between events or status polls.
+// Affects all WaitFor* functions and retries.
+func (c *Client) SetPollDelay(delay time.Duration) *Client {
+ c.millisecondsPerPoll = delay
+ return c
+}
+
+// Resource looks up a resource by name
+func (c Client) Resource(resourceName string) *Resource {
+ selectedResource, ok := c.resources[resourceName]
+ if !ok {
+ log.Fatalf("Could not find resource named '%s', exiting.", resourceName)
+ }
+
+ return selectedResource
+}
+
+// NewClient factory to create new Client struct
+func NewClient(hc *http.Client) (client Client) {
+ if hc != nil {
+ client.resty = resty.NewWithClient(hc)
+ } else {
+ client.resty = resty.New()
+ }
+
+ client.SetUserAgent(DefaultUserAgent)
+
+ baseURL, baseURLExists := os.LookupEnv(APIHostVar)
+
+ if baseURLExists {
+ client.SetBaseURL(baseURL)
+ } else {
+ apiVersion, apiVersionExists := os.LookupEnv(APIVersionVar)
+ if apiVersionExists {
+ client.SetAPIVersion(apiVersion)
+ } else {
+ client.SetAPIVersion(APIVersion)
+ }
+ }
+
+ certPath, certPathExists := os.LookupEnv(APIHostCert)
+
+ if certPathExists {
+ cert, err := ioutil.ReadFile(certPath)
+ if err != nil {
+ log.Fatalf("[ERROR] Error when reading cert at %s: %s\n", certPath, err.Error())
+ }
+
+ client.SetRootCertificate(certPath)
+
+ if envDebug {
+ log.Printf("[DEBUG] Set API root certificate to %s with contents %s\n", certPath, cert)
+ }
+ }
+
+ client.
+ SetRetryWaitTime((1000 * APISecondsPerPoll) * time.Millisecond).
+ SetPollDelay(1000 * APISecondsPerPoll).
+ SetRetries().
+ SetDebug(envDebug)
+
+ addResources(&client)
+
+ return
+}
+
+// nolint
+func addResources(client *Client) {
+ resources := map[string]*Resource{
+ accountName: NewResource(client, accountName, accountEndpoint, false, Account{}, nil), // really?
+ accountSettingsName: NewResource(client, accountSettingsName, accountSettingsEndpoint, false, AccountSettings{}, nil), // really?
+ domainRecordsName: NewResource(client, domainRecordsName, domainRecordsEndpoint, true, DomainRecord{}, DomainRecordsPagedResponse{}),
+ domainsName: NewResource(client, domainsName, domainsEndpoint, false, Domain{}, DomainsPagedResponse{}),
+ eventsName: NewResource(client, eventsName, eventsEndpoint, false, Event{}, EventsPagedResponse{}),
+ firewallsName: NewResource(client, firewallsName, firewallsEndpoint, false, Firewall{}, FirewallsPagedResponse{}),
+ firewallDevicesName: NewResource(client, firewallDevicesName, firewallDevicesEndpoint, true, FirewallDevice{}, FirewallDevicesPagedResponse{}),
+ firewallRulesName: NewResource(client, firewallRulesName, firewallRulesEndpoint, true, FirewallRule{}, nil),
+ imagesName: NewResource(client, imagesName, imagesEndpoint, false, Image{}, ImagesPagedResponse{}),
+ instanceConfigsName: NewResource(client, instanceConfigsName, instanceConfigsEndpoint, true, InstanceConfig{}, InstanceConfigsPagedResponse{}),
+ instanceDisksName: NewResource(client, instanceDisksName, instanceDisksEndpoint, true, InstanceDisk{}, InstanceDisksPagedResponse{}),
+ instanceIPsName: NewResource(client, instanceIPsName, instanceIPsEndpoint, true, InstanceIP{}, nil), // really?
+ instanceSnapshotsName: NewResource(client, instanceSnapshotsName, instanceSnapshotsEndpoint, true, InstanceSnapshot{}, nil),
+ instanceStatsName: NewResource(client, instanceStatsName, instanceStatsEndpoint, true, InstanceStats{}, nil),
+ instanceVolumesName: NewResource(client, instanceVolumesName, instanceVolumesEndpoint, true, nil, InstanceVolumesPagedResponse{}), // really?
+ instancesName: NewResource(client, instancesName, instancesEndpoint, false, Instance{}, InstancesPagedResponse{}),
+ invoiceItemsName: NewResource(client, invoiceItemsName, invoiceItemsEndpoint, true, InvoiceItem{}, InvoiceItemsPagedResponse{}),
+ invoicesName: NewResource(client, invoicesName, invoicesEndpoint, false, Invoice{}, InvoicesPagedResponse{}),
+ ipaddressesName: NewResource(client, ipaddressesName, ipaddressesEndpoint, false, nil, IPAddressesPagedResponse{}), // really?
+ ipv6poolsName: NewResource(client, ipv6poolsName, ipv6poolsEndpoint, false, nil, IPv6PoolsPagedResponse{}), // really?
+ ipv6rangesName: NewResource(client, ipv6rangesName, ipv6rangesEndpoint, false, IPv6Range{}, IPv6RangesPagedResponse{}),
+ kernelsName: NewResource(client, kernelsName, kernelsEndpoint, false, LinodeKernel{}, LinodeKernelsPagedResponse{}),
+ lkeClusterAPIEndpointsName: NewResource(client, lkeClusterAPIEndpointsName, lkeClusterAPIEndpointsEndpoint, true, LKEClusterAPIEndpoint{}, LKEClusterAPIEndpointsPagedResponse{}),
+ lkeClustersName: NewResource(client, lkeClustersName, lkeClustersEndpoint, false, LKECluster{}, LKEClustersPagedResponse{}),
+ lkeClusterPoolsName: NewResource(client, lkeClusterPoolsName, lkeClusterPoolsEndpoint, true, LKEClusterPool{}, LKEClusterPoolsPagedResponse{}),
+ lkeVersionsName: NewResource(client, lkeVersionsName, lkeVersionsEndpoint, false, LKEVersion{}, LKEVersionsPagedResponse{}),
+ longviewName: NewResource(client, longviewName, longviewEndpoint, false, nil, nil), // really?
+ longviewclientsName: NewResource(client, longviewclientsName, longviewclientsEndpoint, false, LongviewClient{}, LongviewClientsPagedResponse{}),
+ longviewsubscriptionsName: NewResource(client, longviewsubscriptionsName, longviewsubscriptionsEndpoint, false, LongviewSubscription{}, LongviewSubscriptionsPagedResponse{}),
+ managedName: NewResource(client, managedName, managedEndpoint, false, nil, nil), // really?
+ nodebalancerconfigsName: NewResource(client, nodebalancerconfigsName, nodebalancerconfigsEndpoint, true, NodeBalancerConfig{}, NodeBalancerConfigsPagedResponse{}),
+ nodebalancernodesName: NewResource(client, nodebalancernodesName, nodebalancernodesEndpoint, true, NodeBalancerNode{}, NodeBalancerNodesPagedResponse{}),
+ nodebalancerStatsName: NewResource(client, nodebalancerStatsName, nodebalancerStatsEndpoint, true, NodeBalancerStats{}, nil),
+ nodebalancersName: NewResource(client, nodebalancersName, nodebalancersEndpoint, false, NodeBalancer{}, NodeBalancerConfigsPagedResponse{}),
+ notificationsName: NewResource(client, notificationsName, notificationsEndpoint, false, Notification{}, NotificationsPagedResponse{}),
+ oauthClientsName: NewResource(client, oauthClientsName, oauthClientsEndpoint, false, OAuthClient{}, OAuthClientsPagedResponse{}),
+ objectStorageBucketsName: NewResource(client, objectStorageBucketsName, objectStorageBucketsEndpoint, false, ObjectStorageBucket{}, ObjectStorageBucketsPagedResponse{}),
+ objectStorageBucketCertsName: NewResource(client, objectStorageBucketCertsName, objectStorageBucketCertsEndpoint, true, ObjectStorageBucketCert{}, nil),
+ objectStorageClustersName: NewResource(client, objectStorageClustersName, objectStorageClustersEndpoint, false, ObjectStorageCluster{}, ObjectStorageClustersPagedResponse{}),
+ objectStorageKeysName: NewResource(client, objectStorageKeysName, objectStorageKeysEndpoint, false, ObjectStorageKey{}, ObjectStorageKeysPagedResponse{}),
+ paymentsName: NewResource(client, paymentsName, paymentsEndpoint, false, Payment{}, PaymentsPagedResponse{}),
+ profileName: NewResource(client, profileName, profileEndpoint, false, nil, nil), // really?
+ regionsName: NewResource(client, regionsName, regionsEndpoint, false, Region{}, RegionsPagedResponse{}),
+ sshkeysName: NewResource(client, sshkeysName, sshkeysEndpoint, false, SSHKey{}, SSHKeysPagedResponse{}),
+ stackscriptsName: NewResource(client, stackscriptsName, stackscriptsEndpoint, false, Stackscript{}, StackscriptsPagedResponse{}),
+ tagsName: NewResource(client, tagsName, tagsEndpoint, false, Tag{}, TagsPagedResponse{}),
+ ticketsName: NewResource(client, ticketsName, ticketsEndpoint, false, Ticket{}, TicketsPagedResponse{}),
+ tokensName: NewResource(client, tokensName, tokensEndpoint, false, Token{}, TokensPagedResponse{}),
+ typesName: NewResource(client, typesName, typesEndpoint, false, LinodeType{}, LinodeTypesPagedResponse{}),
+ userGrantsName: NewResource(client, typesName, userGrantsEndpoint, true, UserGrants{}, nil),
+ usersName: NewResource(client, usersName, usersEndpoint, false, User{}, UsersPagedResponse{}),
+ vlansName: NewResource(client, vlansName, vlansEndpoint, false, VLAN{}, VLANsPagedResponse{}),
+ volumesName: NewResource(client, volumesName, volumesEndpoint, false, Volume{}, VolumesPagedResponse{}),
+ }
+
+ client.resources = resources
+
+ client.Account = resources[accountName]
+ client.DomainRecords = resources[domainRecordsName]
+ client.Domains = resources[domainsName]
+ client.Events = resources[eventsName]
+ client.Firewalls = resources[firewallsName]
+ client.FirewallDevices = resources[firewallDevicesName]
+ client.FirewallRules = resources[firewallRulesName]
+ client.IPAddresses = resources[ipaddressesName]
+ client.IPv6Pools = resources[ipv6poolsName]
+ client.IPv6Ranges = resources[ipv6rangesName]
+ client.Images = resources[imagesName]
+ client.InstanceConfigs = resources[instanceConfigsName]
+ client.InstanceDisks = resources[instanceDisksName]
+ client.InstanceIPs = resources[instanceIPsName]
+ client.InstanceSnapshots = resources[instanceSnapshotsName]
+ client.InstanceStats = resources[instanceStatsName]
+ client.InstanceVolumes = resources[instanceVolumesName]
+ client.Instances = resources[instancesName]
+ client.Invoices = resources[invoicesName]
+ client.Kernels = resources[kernelsName]
+ client.LKEClusterAPIEndpoints = resources[lkeClusterAPIEndpointsName]
+ client.LKEClusters = resources[lkeClustersName]
+ client.LKEClusterPools = resources[lkeClusterPoolsName]
+ client.LKEVersions = resources[lkeVersionsName]
+ client.Longview = resources[longviewName]
+ client.LongviewSubscriptions = resources[longviewsubscriptionsName]
+ client.Managed = resources[managedName]
+ client.NodeBalancerConfigs = resources[nodebalancerconfigsName]
+ client.NodeBalancerNodes = resources[nodebalancernodesName]
+ client.NodeBalancerStats = resources[nodebalancerStatsName]
+ client.NodeBalancers = resources[nodebalancersName]
+ client.Notifications = resources[notificationsName]
+ client.OAuthClients = resources[oauthClientsName]
+ client.ObjectStorageBuckets = resources[objectStorageBucketsName]
+ client.ObjectStorageBucketCerts = resources[objectStorageBucketCertsName]
+ client.ObjectStorageClusters = resources[objectStorageClustersName]
+ client.ObjectStorageKeys = resources[objectStorageKeysName]
+ client.Payments = resources[paymentsName]
+ client.Profile = resources[profileName]
+ client.Regions = resources[regionsName]
+ client.SSHKeys = resources[sshkeysName]
+ client.StackScripts = resources[stackscriptsName]
+ client.Tags = resources[tagsName]
+ client.Tickets = resources[ticketsName]
+ client.Tokens = resources[tokensName]
+ client.Types = resources[typesName]
+ client.UserGrants = resources[userGrantsName]
+ client.Users = resources[usersName]
+ client.VLANs = resources[vlansName]
+ client.Volumes = resources[volumesName]
+}
+
+func copyBool(bPtr *bool) *bool {
+ if bPtr == nil {
+ return nil
+ }
+
+ t := *bPtr
+
+ return &t
+}
+
+func copyInt(iPtr *int) *int {
+ if iPtr == nil {
+ return nil
+ }
+
+ t := *iPtr
+
+ return &t
+}
+
+func copyString(sPtr *string) *string {
+ if sPtr == nil {
+ return nil
+ }
+
+ t := *sPtr
+
+ return &t
+}
+
+func copyTime(tPtr *time.Time) *time.Time {
+ if tPtr == nil {
+ return nil
+ }
+
+ t := *tPtr
+
+ return &t
+}
diff --git a/vendor/github.com/linode/linodego/domain_records.go b/vendor/github.com/linode/linodego/domain_records.go
new file mode 100644
index 000000000..9554c228c
--- /dev/null
+++ b/vendor/github.com/linode/linodego/domain_records.go
@@ -0,0 +1,201 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// DomainRecord represents a DomainRecord object
+type DomainRecord struct {
+ ID int `json:"id"`
+ Type DomainRecordType `json:"type"`
+ Name string `json:"name"`
+ Target string `json:"target"`
+ Priority int `json:"priority"`
+ Weight int `json:"weight"`
+ Port int `json:"port"`
+ Service *string `json:"service"`
+ Protocol *string `json:"protocol"`
+ TTLSec int `json:"ttl_sec"`
+ Tag *string `json:"tag"`
+}
+
+// DomainRecordCreateOptions fields are those accepted by CreateDomainRecord
+type DomainRecordCreateOptions struct {
+ Type DomainRecordType `json:"type"`
+ Name string `json:"name"`
+ Target string `json:"target"`
+ Priority *int `json:"priority,omitempty"`
+ Weight *int `json:"weight,omitempty"`
+ Port *int `json:"port,omitempty"`
+ Service *string `json:"service,omitempty"`
+ Protocol *string `json:"protocol,omitempty"`
+ TTLSec int `json:"ttl_sec,omitempty"` // 0 is not accepted by Linode, so can be omitted
+ Tag *string `json:"tag,omitempty"`
+}
+
+// DomainRecordUpdateOptions fields are those accepted by UpdateDomainRecord
+type DomainRecordUpdateOptions struct {
+ Type DomainRecordType `json:"type,omitempty"`
+ Name string `json:"name,omitempty"`
+ Target string `json:"target,omitempty"`
+ Priority *int `json:"priority,omitempty"` // 0 is valid, so omit only nil values
+ Weight *int `json:"weight,omitempty"` // 0 is valid, so omit only nil values
+ Port *int `json:"port,omitempty"` // 0 is valid to spec, so omit only nil values
+ Service *string `json:"service,omitempty"`
+ Protocol *string `json:"protocol,omitempty"`
+ TTLSec int `json:"ttl_sec,omitempty"` // 0 is not accepted by Linode, so can be omitted
+ Tag *string `json:"tag,omitempty"`
+}
+
+// DomainRecordType constants start with RecordType and include Linode API Domain Record Types
+type DomainRecordType string
+
+// DomainRecordType contants are the DNS record types a DomainRecord can assign
+const (
+ RecordTypeA DomainRecordType = "A"
+ RecordTypeAAAA DomainRecordType = "AAAA"
+ RecordTypeNS DomainRecordType = "NS"
+ RecordTypeMX DomainRecordType = "MX"
+ RecordTypeCNAME DomainRecordType = "CNAME"
+ RecordTypeTXT DomainRecordType = "TXT"
+ RecordTypeSRV DomainRecordType = "SRV"
+ RecordTypePTR DomainRecordType = "PTR"
+ RecordTypeCAA DomainRecordType = "CAA"
+)
+
+// GetUpdateOptions converts a DomainRecord to DomainRecordUpdateOptions for use in UpdateDomainRecord
+func (d DomainRecord) GetUpdateOptions() (du DomainRecordUpdateOptions) {
+ du.Type = d.Type
+ du.Name = d.Name
+ du.Target = d.Target
+ du.Priority = copyInt(&d.Priority)
+ du.Weight = copyInt(&d.Weight)
+ du.Port = copyInt(&d.Port)
+ du.Service = copyString(d.Service)
+ du.Protocol = copyString(d.Protocol)
+ du.TTLSec = d.TTLSec
+ du.Tag = copyString(d.Tag)
+
+ return
+}
+
+// DomainRecordsPagedResponse represents a paginated DomainRecord API response
+type DomainRecordsPagedResponse struct {
+ *PageOptions
+ Data []DomainRecord `json:"data"`
+}
+
+// endpoint gets the endpoint URL for InstanceConfig
+func (DomainRecordsPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.DomainRecords.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends DomainRecords when processing paginated DomainRecord responses
+func (resp *DomainRecordsPagedResponse) appendData(r *DomainRecordsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListDomainRecords lists DomainRecords
+func (c *Client) ListDomainRecords(ctx context.Context, domainID int, opts *ListOptions) ([]DomainRecord, error) {
+ response := DomainRecordsPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, domainID, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// GetDomainRecord gets the domainrecord with the provided ID
+func (c *Client) GetDomainRecord(ctx context.Context, domainID int, id int) (*DomainRecord, error) {
+ e, err := c.DomainRecords.endpointWithParams(domainID)
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&DomainRecord{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*DomainRecord), nil
+}
+
+// CreateDomainRecord creates a DomainRecord
+func (c *Client) CreateDomainRecord(ctx context.Context, domainID int, domainrecord DomainRecordCreateOptions) (*DomainRecord, error) {
+ var body string
+
+ e, err := c.DomainRecords.endpointWithParams(domainID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&DomainRecord{})
+
+ bodyData, err := json.Marshal(domainrecord)
+ if err != nil {
+ return nil, NewError(err)
+ }
+
+ body = string(bodyData)
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*DomainRecord), nil
+}
+
+// UpdateDomainRecord updates the DomainRecord with the specified id
+func (c *Client) UpdateDomainRecord(ctx context.Context, domainID int, id int, domainrecord DomainRecordUpdateOptions) (*DomainRecord, error) {
+ var body string
+
+ e, err := c.DomainRecords.endpointWithParams(domainID)
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&DomainRecord{})
+
+ if bodyData, err := json.Marshal(domainrecord); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*DomainRecord), nil
+}
+
+// DeleteDomainRecord deletes the DomainRecord with the specified id
+func (c *Client) DeleteDomainRecord(ctx context.Context, domainID int, id int) error {
+ e, err := c.DomainRecords.endpointWithParams(domainID)
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/domains.go b/vendor/github.com/linode/linodego/domains.go
new file mode 100644
index 000000000..bbbffda7b
--- /dev/null
+++ b/vendor/github.com/linode/linodego/domains.go
@@ -0,0 +1,323 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// Domain represents a Domain object
+type Domain struct {
+ // This Domain's unique ID
+ ID int `json:"id"`
+
+ // The domain this Domain represents. These must be unique in our system; you cannot have two Domains representing the same domain.
+ Domain string `json:"domain"`
+
+ // If this Domain represents the authoritative source of information for the domain it describes, or if it is a read-only copy of a master (also called a slave).
+ Type DomainType `json:"type"` // Enum:"master" "slave"
+
+ // Deprecated: The group this Domain belongs to. This is for display purposes only.
+ Group string `json:"group"`
+
+ // Used to control whether this Domain is currently being rendered.
+ Status DomainStatus `json:"status"` // Enum:"disabled" "active" "edit_mode" "has_errors"
+
+ // A description for this Domain. This is for display purposes only.
+ Description string `json:"description"`
+
+ // Start of Authority email address. This is required for master Domains.
+ SOAEmail string `json:"soa_email"`
+
+ // The interval, in seconds, at which a failed refresh should be retried.
+ // Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ RetrySec int `json:"retry_sec"`
+
+ // The IP addresses representing the master DNS for this Domain.
+ MasterIPs []string `json:"master_ips"`
+
+ // The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
+ AXfrIPs []string `json:"axfr_ips"`
+
+ // An array of tags applied to this object. Tags are for organizational purposes only.
+ Tags []string `json:"tags"`
+
+ // The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ ExpireSec int `json:"expire_sec"`
+
+ // The amount of time in seconds before this Domain should be refreshed. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ RefreshSec int `json:"refresh_sec"`
+
+ // "Time to Live" - the amount of time in seconds that this Domain's records may be cached by resolvers or other domain servers. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ TTLSec int `json:"ttl_sec"`
+}
+
+// DomainZoneFile represents the Zone File of a Domain
+type DomainZoneFile struct {
+ ZoneFile []string `json:"zone_file"`
+}
+
+// DomainCreateOptions fields are those accepted by CreateDomain
+type DomainCreateOptions struct {
+ // The domain this Domain represents. These must be unique in our system; you cannot have two Domains representing the same domain.
+ Domain string `json:"domain"`
+
+ // If this Domain represents the authoritative source of information for the domain it describes, or if it is a read-only copy of a master (also called a slave).
+ // Enum:"master" "slave"
+ Type DomainType `json:"type"`
+
+ // Deprecated: The group this Domain belongs to. This is for display purposes only.
+ Group string `json:"group,omitempty"`
+
+ // Used to control whether this Domain is currently being rendered.
+ // Enum:"disabled" "active" "edit_mode" "has_errors"
+ Status DomainStatus `json:"status,omitempty"`
+
+ // A description for this Domain. This is for display purposes only.
+ Description string `json:"description,omitempty"`
+
+ // Start of Authority email address. This is required for master Domains.
+ SOAEmail string `json:"soa_email,omitempty"`
+
+ // The interval, in seconds, at which a failed refresh should be retried.
+ // Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ RetrySec int `json:"retry_sec,omitempty"`
+
+ // The IP addresses representing the master DNS for this Domain.
+ MasterIPs []string `json:"master_ips"`
+
+ // The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
+ AXfrIPs []string `json:"axfr_ips"`
+
+ // An array of tags applied to this object. Tags are for organizational purposes only.
+ Tags []string `json:"tags"`
+
+ // The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ ExpireSec int `json:"expire_sec,omitempty"`
+
+ // The amount of time in seconds before this Domain should be refreshed. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ RefreshSec int `json:"refresh_sec,omitempty"`
+
+ // "Time to Live" - the amount of time in seconds that this Domain's records may be cached by resolvers or other domain servers. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ TTLSec int `json:"ttl_sec,omitempty"`
+}
+
+// DomainUpdateOptions converts a Domain to DomainUpdateOptions for use in UpdateDomain
+type DomainUpdateOptions struct {
+ // The domain this Domain represents. These must be unique in our system; you cannot have two Domains representing the same domain.
+ Domain string `json:"domain,omitempty"`
+
+ // If this Domain represents the authoritative source of information for the domain it describes, or if it is a read-only copy of a master (also called a slave).
+ // Enum:"master" "slave"
+ Type DomainType `json:"type,omitempty"`
+
+ // Deprecated: The group this Domain belongs to. This is for display purposes only.
+ Group string `json:"group,omitempty"`
+
+ // Used to control whether this Domain is currently being rendered.
+ // Enum:"disabled" "active" "edit_mode" "has_errors"
+ Status DomainStatus `json:"status,omitempty"`
+
+ // A description for this Domain. This is for display purposes only.
+ Description string `json:"description,omitempty"`
+
+ // Start of Authority email address. This is required for master Domains.
+ SOAEmail string `json:"soa_email,omitempty"`
+
+ // The interval, in seconds, at which a failed refresh should be retried.
+ // Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ RetrySec int `json:"retry_sec,omitempty"`
+
+ // The IP addresses representing the master DNS for this Domain.
+ MasterIPs []string `json:"master_ips"`
+
+ // The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
+ AXfrIPs []string `json:"axfr_ips"`
+
+ // An array of tags applied to this object. Tags are for organizational purposes only.
+ Tags []string `json:"tags"`
+
+ // The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ ExpireSec int `json:"expire_sec,omitempty"`
+
+ // The amount of time in seconds before this Domain should be refreshed. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ RefreshSec int `json:"refresh_sec,omitempty"`
+
+ // "Time to Live" - the amount of time in seconds that this Domain's records may be cached by resolvers or other domain servers. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
+ TTLSec int `json:"ttl_sec,omitempty"`
+}
+
+// DomainType constants start with DomainType and include Linode API Domain Type values
+type DomainType string
+
+// DomainType constants reflect the DNS zone type of a Domain
+const (
+ DomainTypeMaster DomainType = "master"
+ DomainTypeSlave DomainType = "slave"
+)
+
+// DomainStatus constants start with DomainStatus and include Linode API Domain Status values
+type DomainStatus string
+
+// DomainStatus constants reflect the current status of a Domain
+const (
+ DomainStatusDisabled DomainStatus = "disabled"
+ DomainStatusActive DomainStatus = "active"
+ DomainStatusEditMode DomainStatus = "edit_mode"
+ DomainStatusHasErrors DomainStatus = "has_errors"
+)
+
+// GetUpdateOptions converts a Domain to DomainUpdateOptions for use in UpdateDomain
+func (d Domain) GetUpdateOptions() (du DomainUpdateOptions) {
+ du.Domain = d.Domain
+ du.Type = d.Type
+ du.Group = d.Group
+ du.Status = d.Status
+ du.Description = d.Description
+ du.SOAEmail = d.SOAEmail
+ du.RetrySec = d.RetrySec
+ du.MasterIPs = d.MasterIPs
+ du.AXfrIPs = d.AXfrIPs
+ du.Tags = d.Tags
+ du.ExpireSec = d.ExpireSec
+ du.RefreshSec = d.RefreshSec
+ du.TTLSec = d.TTLSec
+
+ return
+}
+
+// DomainsPagedResponse represents a paginated Domain API response
+type DomainsPagedResponse struct {
+ *PageOptions
+ Data []Domain `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Domain
+func (DomainsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Domains.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+
+ return endpoint
+}
+
+// appendData appends Domains when processing paginated Domain responses
+func (resp *DomainsPagedResponse) appendData(r *DomainsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListDomains lists Domains
+func (c *Client) ListDomains(ctx context.Context, opts *ListOptions) ([]Domain, error) {
+ response := DomainsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// GetDomain gets the domain with the provided ID
+func (c *Client) GetDomain(ctx context.Context, id int) (*Domain, error) {
+ e, err := c.Domains.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Domain{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Domain), nil
+}
+
+// CreateDomain creates a Domain
+func (c *Client) CreateDomain(ctx context.Context, domain DomainCreateOptions) (*Domain, error) {
+ var body string
+
+ e, err := c.Domains.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Domain{})
+
+ bodyData, err := json.Marshal(domain)
+ if err != nil {
+ return nil, NewError(err)
+ }
+
+ body = string(bodyData)
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Domain), nil
+}
+
+// UpdateDomain updates the Domain with the specified id
+func (c *Client) UpdateDomain(ctx context.Context, id int, domain DomainUpdateOptions) (*Domain, error) {
+ var body string
+
+ e, err := c.Domains.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&Domain{})
+
+ if bodyData, err := json.Marshal(domain); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Domain), nil
+}
+
+// DeleteDomain deletes the Domain with the specified id
+func (c *Client) DeleteDomain(ctx context.Context, id int) error {
+ e, err := c.Domains.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+
+ return err
+}
+
+// GetDomainZoneFile gets the zone file for the last rendered zone for the specified domain.
+func (c *Client) GetDomainZoneFile(ctx context.Context, domainID int) (*DomainZoneFile, error) {
+ e, err := c.Domains.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d/zone-file", e, domainID)
+
+ resp, err := coupleAPIErrors(c.R(ctx).SetResult(&DomainZoneFile{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.Result().(*DomainZoneFile), nil
+}
diff --git a/vendor/github.com/linode/linodego/env.sample b/vendor/github.com/linode/linodego/env.sample
new file mode 100644
index 000000000..b1c9d1803
--- /dev/null
+++ b/vendor/github.com/linode/linodego/env.sample
@@ -0,0 +1,2 @@
+LINODE_TOKEN=
+LINODE_DEBUG=0
diff --git a/vendor/github.com/linode/linodego/errors.go b/vendor/github.com/linode/linodego/errors.go
new file mode 100644
index 000000000..234d6f974
--- /dev/null
+++ b/vendor/github.com/linode/linodego/errors.go
@@ -0,0 +1,134 @@
+package linodego
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "strings"
+
+ "github.com/go-resty/resty/v2"
+)
+
+const (
+ // ErrorFromString is the Code identifying Errors created by string types
+ ErrorFromString = 1
+ // ErrorFromError is the Code identifying Errors created by error types
+ ErrorFromError = 2
+ // ErrorFromStringer is the Code identifying Errors created by fmt.Stringer types
+ ErrorFromStringer = 3
+)
+
+// Error wraps the LinodeGo error with the relevant http.Response
+type Error struct {
+ Response *http.Response
+ Code int
+ Message string
+}
+
+// APIErrorReason is an individual invalid request message returned by the Linode API
+type APIErrorReason struct {
+ Reason string `json:"reason"`
+ Field string `json:"field"`
+}
+
+func (r APIErrorReason) Error() string {
+ if len(r.Field) == 0 {
+ return r.Reason
+ }
+
+ return fmt.Sprintf("[%s] %s", r.Field, r.Reason)
+}
+
+// APIError is the error-set returned by the Linode API when presented with an invalid request
+type APIError struct {
+ Errors []APIErrorReason `json:"errors"`
+}
+
+func coupleAPIErrors(r *resty.Response, err error) (*resty.Response, error) {
+ if err != nil {
+ return nil, NewError(err)
+ }
+
+ if r.Error() != nil {
+ // Check that response is of the correct content-type before unmarshalling
+ expectedContentType := r.Request.Header.Get("Accept")
+ responseContentType := r.Header().Get("Content-Type")
+
+ // If the upstream Linode API server being fronted fails to respond to the request,
+ // the http server will respond with a default "Bad Gateway" page with Content-Type
+ // "text/html".
+ if r.StatusCode() == http.StatusBadGateway && responseContentType == "text/html" {
+ return nil, Error{Code: http.StatusBadGateway, Message: http.StatusText(http.StatusBadGateway)}
+ }
+
+ if responseContentType != expectedContentType {
+ msg := fmt.Sprintf(
+ "Unexpected Content-Type: Expected: %v, Received: %v\nResponse body: %s",
+ expectedContentType,
+ responseContentType,
+ string(r.Body()),
+ )
+
+ return nil, Error{Code: r.StatusCode(), Message: msg}
+ }
+
+ apiError, ok := r.Error().(*APIError)
+ if !ok || (ok && len(apiError.Errors) == 0) {
+ return r, nil
+ }
+
+ return nil, NewError(r)
+ }
+
+ return r, nil
+}
+
+func (e APIError) Error() string {
+ x := []string{}
+ for _, msg := range e.Errors {
+ x = append(x, msg.Error())
+ }
+
+ return strings.Join(x, "; ")
+}
+
+func (g Error) Error() string {
+ return fmt.Sprintf("[%03d] %s", g.Code, g.Message)
+}
+
+// NewError creates a linodego.Error with a Code identifying the source err type,
+// - ErrorFromString (1) from a string
+// - ErrorFromError (2) for an error
+// - ErrorFromStringer (3) for a Stringer
+// - HTTP Status Codes (100-600) for a resty.Response object
+func NewError(err interface{}) *Error {
+ if err == nil {
+ return nil
+ }
+
+ switch e := err.(type) {
+ case *Error:
+ return e
+ case *resty.Response:
+ apiError, ok := e.Error().(*APIError)
+
+ if !ok {
+ log.Fatalln("Unexpected Resty Error Response")
+ }
+
+ return &Error{
+ Code: e.RawResponse.StatusCode,
+ Message: apiError.Error(),
+ Response: e.RawResponse,
+ }
+ case error:
+ return &Error{Code: ErrorFromError, Message: e.Error()}
+ case string:
+ return &Error{Code: ErrorFromString, Message: e}
+ case fmt.Stringer:
+ return &Error{Code: ErrorFromStringer, Message: e.String()}
+ default:
+ log.Fatalln("Unsupported type to linodego.NewError")
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/linode/linodego/firewall_devices.go b/vendor/github.com/linode/linodego/firewall_devices.go
new file mode 100644
index 000000000..11a78cee1
--- /dev/null
+++ b/vendor/github.com/linode/linodego/firewall_devices.go
@@ -0,0 +1,140 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// FirewallDeviceType represents the different kinds of devices governable by a Firewall
+type FirewallDeviceType string
+
+// FirewallDeviceType constants start with FirewallDevice
+const (
+ FirewallDeviceLinode FirewallDeviceType = "linode"
+ FirewallDeviceNodeBalancer FirewallDeviceType = "nodebalancer"
+)
+
+// FirewallDevice represents a device governed by a Firewall
+type FirewallDevice struct {
+ ID int `json:"id"`
+ Entity FirewallDeviceEntity `json:"entity"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+}
+
+// FirewallDeviceCreateOptions fields are those accepted by CreateFirewallDevice
+type FirewallDeviceCreateOptions struct {
+ ID int `json:"id"`
+ Type FirewallDeviceType `json:"type"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (device *FirewallDevice) UnmarshalJSON(b []byte) error {
+ type Mask FirewallDevice
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(device),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ device.Created = (*time.Time)(p.Created)
+ device.Updated = (*time.Time)(p.Updated)
+ return nil
+}
+
+// FirewallDeviceEntity contains information about a device associated with a Firewall
+type FirewallDeviceEntity struct {
+ ID int `json:"id"`
+ Type FirewallDeviceType `json:"type"`
+ Label string `json:"label"`
+ URL string `json:"url"`
+}
+
+// FirewallDevicesPagedResponse represents a Linode API response for FirewallDevices
+type FirewallDevicesPagedResponse struct {
+ *PageOptions
+ Data []FirewallDevice `json:"data"`
+}
+
+// endpointWithID gets the endpoint URL for FirewallDevices of a given Firewall
+func (FirewallDevicesPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.FirewallDevices.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *FirewallDevicesPagedResponse) appendData(r *FirewallDevicesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListFirewallDevices get devices associated with a given Firewall
+func (c *Client) ListFirewallDevices(ctx context.Context, firewallID int, opts *ListOptions) ([]FirewallDevice, error) {
+ response := FirewallDevicesPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, firewallID, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetFirewallDevice gets a FirewallDevice given an ID
+func (c *Client) GetFirewallDevice(ctx context.Context, firewallID, deviceID int) (*FirewallDevice, error) {
+ e, err := c.FirewallDevices.endpointWithParams(firewallID)
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, deviceID)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&FirewallDevice{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*FirewallDevice), nil
+}
+
+// AddFirewallDevice associates a Device with a given Firewall
+func (c *Client) CreateFirewallDevice(ctx context.Context, firewallID int, createOpts FirewallDeviceCreateOptions) (*FirewallDevice, error) {
+ var body string
+ e, err := c.FirewallDevices.endpointWithParams(firewallID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&FirewallDevice{})
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.SetBody(body).Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*FirewallDevice), nil
+}
+
+// DeleteFirewallDevice disassociates a Device with a given Firewall
+func (c *Client) DeleteFirewallDevice(ctx context.Context, firewallID, deviceID int) error {
+ e, err := c.FirewallDevices.endpointWithParams(firewallID)
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, deviceID)
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/firewall_rules.go b/vendor/github.com/linode/linodego/firewall_rules.go
new file mode 100644
index 000000000..081c60412
--- /dev/null
+++ b/vendor/github.com/linode/linodego/firewall_rules.go
@@ -0,0 +1,76 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+)
+
+// NetworkProtocol enum type
+type NetworkProtocol string
+
+// NetworkProtocol enum values
+const (
+ TCP NetworkProtocol = "TCP"
+ UDP NetworkProtocol = "UDP"
+ ICMP NetworkProtocol = "ICMP"
+)
+
+// NetworkAddresses are arrays of ipv4 and v6 addresses
+type NetworkAddresses struct {
+ IPv4 *[]string `json:"ipv4,omitempty"`
+ IPv6 *[]string `json:"ipv6,omitempty"`
+}
+
+// A FirewallRule is a whitelist of ports, protocols, and addresses for which traffic should be allowed.
+type FirewallRule struct {
+ Action string `json:"action"`
+ Label string `json:"label"`
+ Description string `json:"description,omitempty"`
+ Ports string `json:"ports,omitempty"`
+ Protocol NetworkProtocol `json:"protocol"`
+ Addresses NetworkAddresses `json:"addresses"`
+}
+
+// FirewallRuleSet is a pair of inbound and outbound rules that specify what network traffic should be allowed.
+type FirewallRuleSet struct {
+ Inbound []FirewallRule `json:"inbound"`
+ InboundPolicy string `json:"inbound_policy"`
+ Outbound []FirewallRule `json:"outbound"`
+ OutboundPolicy string `json:"outbound_policy"`
+}
+
+// GetFirewallRules gets the FirewallRuleSet for the given Firewall.
+func (c *Client) GetFirewallRules(ctx context.Context, firewallID int) (*FirewallRuleSet, error) {
+ e, err := c.FirewallRules.endpointWithParams(firewallID)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&FirewallRuleSet{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*FirewallRuleSet), nil
+}
+
+// UpdateFirewallRules updates the FirewallRuleSet for the given Firewall
+func (c *Client) UpdateFirewallRules(ctx context.Context, firewallID int, rules FirewallRuleSet) (*FirewallRuleSet, error) {
+ e, err := c.FirewallRules.endpointWithParams(firewallID)
+ if err != nil {
+ return nil, err
+ }
+
+ var body string
+ req := c.R(ctx).SetResult(&FirewallRuleSet{})
+ if bodyData, err := json.Marshal(rules); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.SetBody(body).Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*FirewallRuleSet), nil
+}
diff --git a/vendor/github.com/linode/linodego/firewalls.go b/vendor/github.com/linode/linodego/firewalls.go
new file mode 100644
index 000000000..c0379881d
--- /dev/null
+++ b/vendor/github.com/linode/linodego/firewalls.go
@@ -0,0 +1,194 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// FirewallStatus enum type
+type FirewallStatus string
+
+// FirewallStatus enums start with Firewall
+const (
+ FirewallEnabled FirewallStatus = "enabled"
+ FirewallDisabled FirewallStatus = "disabled"
+ FirewallDeleted FirewallStatus = "deleted"
+)
+
+// A Firewall is a set of networking rules (iptables) applied to Devices with which it is associated
+type Firewall struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Status FirewallStatus `json:"status"`
+ Tags []string `json:"tags,omitempty"`
+ Rules FirewallRuleSet `json:"rules"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+}
+
+// DevicesCreationOptions fields are used when adding devices during the Firewall creation process.
+type DevicesCreationOptions struct {
+ Linodes []int `json:"linodes,omitempty"`
+ NodeBalancers []int `json:"nodebalancers,omitempty"`
+}
+
+// FirewallCreateOptions fields are those accepted by CreateFirewall
+type FirewallCreateOptions struct {
+ Label string `json:"label,omitempty"`
+ Rules FirewallRuleSet `json:"rules"`
+ Tags []string `json:"tags,omitempty"`
+ Devices DevicesCreationOptions `json:"devices,omitempty"`
+}
+
+// FirewallUpdateOptions is an options struct used when Updating a Firewall
+type FirewallUpdateOptions struct {
+ Label string `json:"label,omitempty"`
+ Status FirewallStatus `json:"status,omitempty"`
+ Tags *[]string `json:"tags,omitempty"`
+}
+
+// GetUpdateOptions converts a Firewall to FirewallUpdateOptions for use in Client.UpdateFirewall.
+func (f *Firewall) GetUpdateOptions() FirewallUpdateOptions {
+ return FirewallUpdateOptions{
+ Label: f.Label,
+ Status: f.Status,
+ Tags: &f.Tags,
+ }
+}
+
+// UnmarshalJSON for Firewall responses
+func (f *Firewall) UnmarshalJSON(b []byte) error {
+ type Mask Firewall
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(f),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ f.Created = (*time.Time)(p.Created)
+ f.Updated = (*time.Time)(p.Updated)
+ return nil
+}
+
+// FirewallsPagedResponse represents a Linode API response for listing of Cloud Firewalls
+type FirewallsPagedResponse struct {
+ *PageOptions
+ Data []Firewall `json:"data"`
+}
+
+func (FirewallsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Firewalls.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *FirewallsPagedResponse) appendData(r *FirewallsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListFirewalls returns a paginated list of Cloud Firewalls
+func (c *Client) ListFirewalls(ctx context.Context, opts *ListOptions) ([]Firewall, error) {
+ response := FirewallsPagedResponse{}
+
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// CreateFirewall creates a single Firewall with at least one set of inbound or outbound rules
+func (c *Client) CreateFirewall(ctx context.Context, createOpts FirewallCreateOptions) (*Firewall, error) {
+ var body string
+ e, err := c.Firewalls.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Firewall{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Firewall), nil
+}
+
+// GetFirewall gets a single Firewall with the provided ID
+func (c *Client) GetFirewall(ctx context.Context, id int) (*Firewall, error) {
+ e, err := c.Firewalls.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx)
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(req.SetResult(&Firewall{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Firewall), nil
+}
+
+// UpdateFirewall updates a Firewall with the given ID
+func (c *Client) UpdateFirewall(ctx context.Context, id int, updateOpts FirewallUpdateOptions) (*Firewall, error) {
+ e, err := c.Firewalls.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Firewall{})
+
+ bodyData, err := json.Marshal(updateOpts)
+ if err != nil {
+ return nil, NewError(err)
+ }
+
+ body := string(bodyData)
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(req.SetBody(body).Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Firewall), nil
+}
+
+// DeleteFirewall deletes a single Firewall with the provided ID
+func (c *Client) DeleteFirewall(ctx context.Context, id int) error {
+ e, err := c.Firewalls.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ req := c.R(ctx)
+
+ e = fmt.Sprintf("%s/%d", e, id)
+ _, err = coupleAPIErrors(req.Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/go.mod b/vendor/github.com/linode/linodego/go.mod
new file mode 100644
index 000000000..c4d45d114
--- /dev/null
+++ b/vendor/github.com/linode/linodego/go.mod
@@ -0,0 +1,11 @@
+module github.com/linode/linodego
+
+require (
+ github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48
+ github.com/google/go-cmp v0.4.0
+ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
+)
+
+go 1.16
+
+retract v1.0.0 // Accidental branch push
diff --git a/vendor/github.com/linode/linodego/go.sum b/vendor/github.com/linode/linodego/go.sum
new file mode 100644
index 000000000..de92d389d
--- /dev/null
+++ b/vendor/github.com/linode/linodego/go.sum
@@ -0,0 +1,22 @@
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY=
+github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
diff --git a/vendor/github.com/linode/linodego/images.go b/vendor/github.com/linode/linodego/images.go
new file mode 100644
index 000000000..0a5d1a597
--- /dev/null
+++ b/vendor/github.com/linode/linodego/images.go
@@ -0,0 +1,274 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/go-resty/resty/v2"
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// ImageStatus represents the status of an Image.
+type ImageStatus string
+
+// ImageStatus options start with ImageStatus and include all Image statuses
+const (
+ ImageStatusCreating ImageStatus = "creating"
+ ImageStatusPendingUpload ImageStatus = "pending_upload"
+ ImageStatusAvailable ImageStatus = "available"
+)
+
+// Image represents a deployable Image object for use with Linode Instances
+type Image struct {
+ ID string `json:"id"`
+ CreatedBy string `json:"created_by"`
+ Label string `json:"label"`
+ Description string `json:"description"`
+ Type string `json:"type"`
+ Vendor string `json:"vendor"`
+ Status ImageStatus `json:"status"`
+ Size int `json:"size"`
+ IsPublic bool `json:"is_public"`
+ Deprecated bool `json:"deprecated"`
+ Created *time.Time `json:"-"`
+ Expiry *time.Time `json:"-"`
+}
+
+// ImageCreateOptions fields are those accepted by CreateImage
+type ImageCreateOptions struct {
+ DiskID int `json:"disk_id"`
+ Label string `json:"label"`
+ Description string `json:"description,omitempty"`
+}
+
+// ImageUpdateOptions fields are those accepted by UpdateImage
+type ImageUpdateOptions struct {
+ Label string `json:"label,omitempty"`
+ Description *string `json:"description,omitempty"`
+}
+
+// ImageCreateUploadResponse fields are those returned by CreateImageUpload
+type ImageCreateUploadResponse struct {
+ Image *Image `json:"image"`
+ UploadTo string `json:"upload_to"`
+}
+
+// ImageCreateUploadOptions fields are those accepted by CreateImageUpload
+type ImageCreateUploadOptions struct {
+ Region string `json:"region"`
+ Label string `json:"label"`
+ Description string `json:"description,omitempty"`
+}
+
+// ImageUploadOptions fields are those accepted by UploadImage
+type ImageUploadOptions struct {
+ Region string `json:"region"`
+ Label string `json:"label"`
+ Description string `json:"description,omitempty"`
+ Image io.Reader
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Image) UnmarshalJSON(b []byte) error {
+ type Mask Image
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Expiry *parseabletime.ParseableTime `json:"expiry"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Expiry = (*time.Time)(p.Expiry)
+
+ return nil
+}
+
+// GetUpdateOptions converts an Image to ImageUpdateOptions for use in UpdateImage
+func (i Image) GetUpdateOptions() (iu ImageUpdateOptions) {
+ iu.Label = i.Label
+ iu.Description = copyString(&i.Description)
+ return
+}
+
+// ImagesPagedResponse represents a linode API response for listing of images
+type ImagesPagedResponse struct {
+ *PageOptions
+ Data []Image `json:"data"`
+}
+
+func (ImagesPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Images.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *ImagesPagedResponse) appendData(r *ImagesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListImages lists Images
+func (c *Client) ListImages(ctx context.Context, opts *ListOptions) ([]Image, error) {
+ response := ImagesPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetImage gets the Image with the provided ID
+func (c *Client) GetImage(ctx context.Context, id string) (*Image, error) {
+ e, err := c.Images.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.Images.R(ctx).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Image), nil
+}
+
+// CreateImage creates a Image
+func (c *Client) CreateImage(ctx context.Context, createOpts ImageCreateOptions) (*Image, error) {
+ var body string
+
+ e, err := c.Images.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Image{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Image), nil
+}
+
+// UpdateImage updates the Image with the specified id
+func (c *Client) UpdateImage(ctx context.Context, id string, updateOpts ImageUpdateOptions) (*Image, error) {
+ var body string
+
+ e, err := c.Images.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+
+ req := c.R(ctx).SetResult(&Image{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Image), nil
+}
+
+// DeleteImage deletes the Image with the specified id
+func (c *Client) DeleteImage(ctx context.Context, id string) error {
+ e, err := c.Images.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
+
+// CreateImageUpload creates an Image and an upload URL
+func (c *Client) CreateImageUpload(ctx context.Context, createOpts ImageCreateUploadOptions) (image *Image, uploadURL string, err error) {
+ var body string
+
+ e, err := c.Images.Endpoint()
+ if err != nil {
+ return nil, "", err
+ }
+
+ e = fmt.Sprintf("%s/upload", e)
+
+ req := c.R(ctx).SetResult(&ImageCreateUploadResponse{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, "", NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, "", err
+ }
+
+ result, ok := r.Result().(*ImageCreateUploadResponse)
+ if !ok {
+ return nil, "", fmt.Errorf("failed to parse result")
+ }
+
+ return result.Image, result.UploadTo, nil
+}
+
+// UploadImageToURL uploads the given image to the given upload URL
+func (c *Client) UploadImageToURL(ctx context.Context, uploadURL string, image io.Reader) error {
+ // Linode-specific headers do not need to be sent to this endpoint
+ req := resty.New().SetDebug(c.resty.Debug).R().
+ SetContext(ctx).
+ SetContentLength(true).
+ SetHeader("Content-Type", "application/octet-stream").
+ SetBody(image)
+
+ _, err := coupleAPIErrors(req.
+ Put(uploadURL))
+
+ return err
+}
+
+// UploadImage creates and uploads an image
+func (c *Client) UploadImage(ctx context.Context, options ImageUploadOptions) (*Image, error) {
+ image, uploadURL, err := c.CreateImageUpload(ctx, ImageCreateUploadOptions{
+ Label: options.Label,
+ Region: options.Region,
+ Description: options.Description,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return image, c.UploadImageToURL(ctx, uploadURL, options.Image)
+}
diff --git a/vendor/github.com/linode/linodego/instance_configs.go b/vendor/github.com/linode/linodego/instance_configs.go
new file mode 100644
index 000000000..fc5566bd9
--- /dev/null
+++ b/vendor/github.com/linode/linodego/instance_configs.go
@@ -0,0 +1,275 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// InstanceConfig represents all of the settings that control the boot and run configuration of a Linode Instance
+type InstanceConfig struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Comments string `json:"comments"`
+ Devices *InstanceConfigDeviceMap `json:"devices"`
+ Helpers *InstanceConfigHelpers `json:"helpers"`
+ Interfaces []InstanceConfigInterface `json:"interfaces"`
+ MemoryLimit int `json:"memory_limit"`
+ Kernel string `json:"kernel"`
+ InitRD *int `json:"init_rd"`
+ RootDevice string `json:"root_device"`
+ RunLevel string `json:"run_level"`
+ VirtMode string `json:"virt_mode"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+}
+
+// InstanceConfigDevice contains either the DiskID or VolumeID assigned to a Config Device
+type InstanceConfigDevice struct {
+ DiskID int `json:"disk_id,omitempty"`
+ VolumeID int `json:"volume_id,omitempty"`
+}
+
+// InstanceConfigDeviceMap contains SDA-SDH InstanceConfigDevice settings
+type InstanceConfigDeviceMap struct {
+ SDA *InstanceConfigDevice `json:"sda,omitempty"`
+ SDB *InstanceConfigDevice `json:"sdb,omitempty"`
+ SDC *InstanceConfigDevice `json:"sdc,omitempty"`
+ SDD *InstanceConfigDevice `json:"sdd,omitempty"`
+ SDE *InstanceConfigDevice `json:"sde,omitempty"`
+ SDF *InstanceConfigDevice `json:"sdf,omitempty"`
+ SDG *InstanceConfigDevice `json:"sdg,omitempty"`
+ SDH *InstanceConfigDevice `json:"sdh,omitempty"`
+}
+
+// InstanceConfigHelpers are Instance Config options that control Linux distribution specific tweaks
+type InstanceConfigHelpers struct {
+ UpdateDBDisabled bool `json:"updatedb_disabled"`
+ Distro bool `json:"distro"`
+ ModulesDep bool `json:"modules_dep"`
+ Network bool `json:"network"`
+ DevTmpFsAutomount bool `json:"devtmpfs_automount"`
+}
+
+// ConfigInterfacePurpose options start with InterfacePurpose and include all known interface purpose types
+type ConfigInterfacePurpose string
+
+const (
+ InterfacePurposePublic ConfigInterfacePurpose = "public"
+ InterfacePurposeVLAN ConfigInterfacePurpose = "vlan"
+)
+
+// InstanceConfigInterface contains information about a configuration's network interface
+type InstanceConfigInterface struct {
+ IPAMAddress string `json:"ipam_address"`
+ Label string `json:"label"`
+ Purpose ConfigInterfacePurpose `json:"purpose"`
+}
+
+// InstanceConfigsPagedResponse represents a paginated InstanceConfig API response
+type InstanceConfigsPagedResponse struct {
+ *PageOptions
+ Data []InstanceConfig `json:"data"`
+}
+
+// InstanceConfigCreateOptions are InstanceConfig settings that can be used at creation
+type InstanceConfigCreateOptions struct {
+ Label string `json:"label,omitempty"`
+ Comments string `json:"comments,omitempty"`
+ Devices InstanceConfigDeviceMap `json:"devices"`
+ Helpers *InstanceConfigHelpers `json:"helpers,omitempty"`
+ Interfaces []InstanceConfigInterface `json:"interfaces"`
+ MemoryLimit int `json:"memory_limit,omitempty"`
+ Kernel string `json:"kernel,omitempty"`
+ InitRD int `json:"init_rd,omitempty"`
+ RootDevice *string `json:"root_device,omitempty"`
+ RunLevel string `json:"run_level,omitempty"`
+ VirtMode string `json:"virt_mode,omitempty"`
+}
+
+// InstanceConfigUpdateOptions are InstanceConfig settings that can be used in updates
+type InstanceConfigUpdateOptions struct {
+ Label string `json:"label,omitempty"`
+ Comments string `json:"comments"`
+ Devices *InstanceConfigDeviceMap `json:"devices,omitempty"`
+ Helpers *InstanceConfigHelpers `json:"helpers,omitempty"`
+ Interfaces []InstanceConfigInterface `json:"interfaces"`
+ // MemoryLimit 0 means unlimitted, this is not omitted
+ MemoryLimit int `json:"memory_limit"`
+ Kernel string `json:"kernel,omitempty"`
+ // InitRD is nullable, permit the sending of null
+ InitRD *int `json:"init_rd"`
+ RootDevice string `json:"root_device,omitempty"`
+ RunLevel string `json:"run_level,omitempty"`
+ VirtMode string `json:"virt_mode,omitempty"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *InstanceConfig) UnmarshalJSON(b []byte) error {
+ type Mask InstanceConfig
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Updated = (*time.Time)(p.Updated)
+
+ return nil
+}
+
+// GetCreateOptions converts a InstanceConfig to InstanceConfigCreateOptions for use in CreateInstanceConfig
+func (i InstanceConfig) GetCreateOptions() InstanceConfigCreateOptions {
+ initrd := 0
+ if i.InitRD != nil {
+ initrd = *i.InitRD
+ }
+ return InstanceConfigCreateOptions{
+ Label: i.Label,
+ Comments: i.Comments,
+ Devices: *i.Devices,
+ Helpers: i.Helpers,
+ Interfaces: i.Interfaces,
+ MemoryLimit: i.MemoryLimit,
+ Kernel: i.Kernel,
+ InitRD: initrd,
+ RootDevice: copyString(&i.RootDevice),
+ RunLevel: i.RunLevel,
+ VirtMode: i.VirtMode,
+ }
+}
+
+// GetUpdateOptions converts a InstanceConfig to InstanceConfigUpdateOptions for use in UpdateInstanceConfig
+func (i InstanceConfig) GetUpdateOptions() InstanceConfigUpdateOptions {
+ return InstanceConfigUpdateOptions{
+ Label: i.Label,
+ Comments: i.Comments,
+ Devices: i.Devices,
+ Helpers: i.Helpers,
+ Interfaces: i.Interfaces,
+ MemoryLimit: i.MemoryLimit,
+ Kernel: i.Kernel,
+ InitRD: copyInt(i.InitRD),
+ RootDevice: i.RootDevice,
+ RunLevel: i.RunLevel,
+ VirtMode: i.VirtMode,
+ }
+}
+
+// endpointWithID gets the endpoint URL for InstanceConfigs of a given Instance
+func (InstanceConfigsPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.InstanceConfigs.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends InstanceConfigs when processing paginated InstanceConfig responses
+func (resp *InstanceConfigsPagedResponse) appendData(r *InstanceConfigsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListInstanceConfigs lists InstanceConfigs
+func (c *Client) ListInstanceConfigs(ctx context.Context, linodeID int, opts *ListOptions) ([]InstanceConfig, error) {
+ response := InstanceConfigsPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, linodeID, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetInstanceConfig gets the template with the provided ID
+func (c *Client) GetInstanceConfig(ctx context.Context, linodeID int, configID int) (*InstanceConfig, error) {
+ e, err := c.InstanceConfigs.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, configID)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceConfig{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceConfig), nil
+}
+
+// CreateInstanceConfig creates a new InstanceConfig for the given Instance
+func (c *Client) CreateInstanceConfig(ctx context.Context, linodeID int, createOpts InstanceConfigCreateOptions) (*InstanceConfig, error) {
+ var body string
+ e, err := c.InstanceConfigs.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&InstanceConfig{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*InstanceConfig), nil
+}
+
+// UpdateInstanceConfig update an InstanceConfig for the given Instance
+func (c *Client) UpdateInstanceConfig(ctx context.Context, linodeID int, configID int, updateOpts InstanceConfigUpdateOptions) (*InstanceConfig, error) {
+ var body string
+ e, err := c.InstanceConfigs.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, configID)
+ req := c.R(ctx).SetResult(&InstanceConfig{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*InstanceConfig), nil
+}
+
+// RenameInstanceConfig renames an InstanceConfig
+func (c *Client) RenameInstanceConfig(ctx context.Context, linodeID int, configID int, label string) (*InstanceConfig, error) {
+ return c.UpdateInstanceConfig(ctx, linodeID, configID, InstanceConfigUpdateOptions{Label: label})
+}
+
+// DeleteInstanceConfig deletes a Linode InstanceConfig
+func (c *Client) DeleteInstanceConfig(ctx context.Context, linodeID int, configID int) error {
+ e, err := c.InstanceConfigs.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, configID)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/instance_disks.go b/vendor/github.com/linode/linodego/instance_disks.go
new file mode 100644
index 000000000..313d846ee
--- /dev/null
+++ b/vendor/github.com/linode/linodego/instance_disks.go
@@ -0,0 +1,257 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// InstanceDisk represents an Instance Disk object
+type InstanceDisk struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Status DiskStatus `json:"status"`
+ Size int `json:"size"`
+ Filesystem DiskFilesystem `json:"filesystem"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+}
+
+// DiskFilesystem constants start with Filesystem and include Linode API Filesystems
+type DiskFilesystem string
+
+// DiskFilesystem constants represent the filesystems types an Instance Disk may use
+const (
+ FilesystemRaw DiskFilesystem = "raw"
+ FilesystemSwap DiskFilesystem = "swap"
+ FilesystemExt3 DiskFilesystem = "ext3"
+ FilesystemExt4 DiskFilesystem = "ext4"
+ FilesystemInitrd DiskFilesystem = "initrd"
+)
+
+// DiskStatus constants have the prefix "Disk" and include Linode API Instance Disk Status
+type DiskStatus string
+
+// DiskStatus constants represent the status values an Instance Disk may have
+const (
+ DiskReady DiskStatus = "ready"
+ DiskNotReady DiskStatus = "not ready"
+ DiskDeleting DiskStatus = "deleting"
+)
+
+// InstanceDisksPagedResponse represents a paginated InstanceDisk API response
+type InstanceDisksPagedResponse struct {
+ *PageOptions
+ Data []InstanceDisk `json:"data"`
+}
+
+// InstanceDiskCreateOptions are InstanceDisk settings that can be used at creation
+type InstanceDiskCreateOptions struct {
+ Label string `json:"label"`
+ Size int `json:"size"`
+
+ // Image is optional, but requires RootPass if provided
+ Image string `json:"image,omitempty"`
+ RootPass string `json:"root_pass,omitempty"`
+
+ Filesystem string `json:"filesystem,omitempty"`
+ AuthorizedKeys []string `json:"authorized_keys,omitempty"`
+ AuthorizedUsers []string `json:"authorized_users,omitempty"`
+ ReadOnly bool `json:"read_only,omitempty"`
+ StackscriptID int `json:"stackscript_id,omitempty"`
+ StackscriptData map[string]string `json:"stackscript_data,omitempty"`
+}
+
+// InstanceDiskUpdateOptions are InstanceDisk settings that can be used in updates
+type InstanceDiskUpdateOptions struct {
+ Label string `json:"label"`
+ ReadOnly bool `json:"read_only"`
+}
+
+// endpointWithID gets the endpoint URL for InstanceDisks of a given Instance
+func (InstanceDisksPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.InstanceDisks.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends InstanceDisks when processing paginated InstanceDisk responses
+func (resp *InstanceDisksPagedResponse) appendData(r *InstanceDisksPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListInstanceDisks lists InstanceDisks
+func (c *Client) ListInstanceDisks(ctx context.Context, linodeID int, opts *ListOptions) ([]InstanceDisk, error) {
+ response := InstanceDisksPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, linodeID, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *InstanceDisk) UnmarshalJSON(b []byte) error {
+ type Mask InstanceDisk
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Updated = (*time.Time)(p.Updated)
+
+ return nil
+}
+
+// GetInstanceDisk gets the template with the provided ID
+func (c *Client) GetInstanceDisk(ctx context.Context, linodeID int, configID int) (*InstanceDisk, error) {
+ e, err := c.InstanceDisks.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, configID)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceDisk{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceDisk), nil
+}
+
+// CreateInstanceDisk creates a new InstanceDisk for the given Instance
+func (c *Client) CreateInstanceDisk(ctx context.Context, linodeID int, createOpts InstanceDiskCreateOptions) (*InstanceDisk, error) {
+ var body string
+ e, err := c.InstanceDisks.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&InstanceDisk{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*InstanceDisk), nil
+}
+
+// UpdateInstanceDisk creates a new InstanceDisk for the given Instance
+func (c *Client) UpdateInstanceDisk(ctx context.Context, linodeID int, diskID int, updateOpts InstanceDiskUpdateOptions) (*InstanceDisk, error) {
+ var body string
+ e, err := c.InstanceDisks.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%d", e, diskID)
+ req := c.R(ctx).SetResult(&InstanceDisk{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*InstanceDisk), nil
+}
+
+// RenameInstanceDisk renames an InstanceDisk
+func (c *Client) RenameInstanceDisk(ctx context.Context, linodeID int, diskID int, label string) (*InstanceDisk, error) {
+ return c.UpdateInstanceDisk(ctx, linodeID, diskID, InstanceDiskUpdateOptions{Label: label})
+}
+
+// ResizeInstanceDisk resizes the size of the Instance disk
+func (c *Client) ResizeInstanceDisk(ctx context.Context, linodeID int, diskID int, size int) error {
+ var body string
+ e, err := c.InstanceDisks.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/resize", e, diskID)
+
+ req := c.R(ctx).SetResult(&InstanceDisk{})
+ updateOpts := map[string]interface{}{
+ "size": size,
+ }
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return NewError(err)
+ }
+
+ _, err = coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+
+ return err
+}
+
+// PasswordResetInstanceDisk resets the "root" account password on the Instance disk
+func (c *Client) PasswordResetInstanceDisk(ctx context.Context, linodeID int, diskID int, password string) error {
+ var body string
+ e, err := c.InstanceDisks.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/password", e, diskID)
+
+ req := c.R(ctx).SetResult(&InstanceDisk{})
+ updateOpts := map[string]interface{}{
+ "password": password,
+ }
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return NewError(err)
+ }
+
+ _, err = coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+
+ return err
+}
+
+// DeleteInstanceDisk deletes a Linode Instance Disk
+func (c *Client) DeleteInstanceDisk(ctx context.Context, linodeID int, diskID int) error {
+ e, err := c.InstanceDisks.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, diskID)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/instance_ips.go b/vendor/github.com/linode/linodego/instance_ips.go
new file mode 100644
index 000000000..5137200b3
--- /dev/null
+++ b/vendor/github.com/linode/linodego/instance_ips.go
@@ -0,0 +1,156 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// InstanceIPAddressResponse contains the IPv4 and IPv6 details for an Instance
+type InstanceIPAddressResponse struct {
+ IPv4 *InstanceIPv4Response `json:"ipv4"`
+ IPv6 *InstanceIPv6Response `json:"ipv6"`
+}
+
+// InstanceIPv4Response contains the details of all IPv4 addresses associated with an Instance
+type InstanceIPv4Response struct {
+ Public []*InstanceIP `json:"public"`
+ Private []*InstanceIP `json:"private"`
+ Shared []*InstanceIP `json:"shared"`
+ Reserved []*InstanceIP `json:"reserved"`
+}
+
+// InstanceIP represents an Instance IP with additional DNS and networking details
+type InstanceIP struct {
+ Address string `json:"address"`
+ Gateway string `json:"gateway"`
+ SubnetMask string `json:"subnet_mask"`
+ Prefix int `json:"prefix"`
+ Type InstanceIPType `json:"type"`
+ Public bool `json:"public"`
+ RDNS string `json:"rdns"`
+ LinodeID int `json:"linode_id"`
+ Region string `json:"region"`
+}
+
+// InstanceIPv6Response contains the IPv6 addresses and ranges for an Instance
+type InstanceIPv6Response struct {
+ LinkLocal *InstanceIP `json:"link_local"`
+ SLAAC *InstanceIP `json:"slaac"`
+ Global []*IPv6Range `json:"global"`
+}
+
+// IPv6Range represents a range of IPv6 addresses routed to a single Linode in a given Region
+type IPv6Range struct {
+ Range string `json:"range"`
+ Region string `json:"region"`
+ Prefix int `json:"prefix"`
+}
+
+// InstanceIPType constants start with IPType and include Linode Instance IP Types
+type InstanceIPType string
+
+// InstanceIPType constants represent the IP types an Instance IP may be
+const (
+ IPTypeIPv4 InstanceIPType = "ipv4"
+ IPTypeIPv6 InstanceIPType = "ipv6"
+ IPTypeIPv6Pool InstanceIPType = "ipv6/pool"
+ IPTypeIPv6Range InstanceIPType = "ipv6/range"
+)
+
+// GetInstanceIPAddresses gets the IPAddresses for a Linode instance
+func (c *Client) GetInstanceIPAddresses(ctx context.Context, linodeID int) (*InstanceIPAddressResponse, error) {
+ e, err := c.InstanceIPs.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIPAddressResponse{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceIPAddressResponse), nil
+}
+
+// GetInstanceIPAddress gets the IPAddress for a Linode instance matching a supplied IP address
+func (c *Client) GetInstanceIPAddress(ctx context.Context, linodeID int, ipaddress string) (*InstanceIP, error) {
+ e, err := c.InstanceIPs.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, ipaddress)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIP{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceIP), nil
+}
+
+// AddInstanceIPAddress adds a public or private IP to a Linode instance
+func (c *Client) AddInstanceIPAddress(ctx context.Context, linodeID int, public bool) (*InstanceIP, error) {
+ var body string
+ e, err := c.InstanceIPs.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&InstanceIP{})
+
+ instanceipRequest := struct {
+ Type string `json:"type"`
+ Public bool `json:"public"`
+ }{"ipv4", public}
+
+ if bodyData, err := json.Marshal(instanceipRequest); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetHeader("Content-Type", "application/json").
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*InstanceIP), nil
+}
+
+// UpdateInstanceIPAddress updates the IPAddress with the specified instance id and IP address
+func (c *Client) UpdateInstanceIPAddress(ctx context.Context, linodeID int, ipAddress string, updateOpts IPAddressUpdateOptions) (*InstanceIP, error) {
+ var body string
+ e, err := c.InstanceIPs.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, ipAddress)
+
+ req := c.R(ctx).SetResult(&InstanceIP{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceIP), nil
+}
+
+func (c *Client) DeleteInstanceIPAddress(ctx context.Context, linodeID int, ipAddress string) error {
+ e, err := c.InstanceIPs.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%s", e, ipAddress)
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/instance_snapshots.go b/vendor/github.com/linode/linodego/instance_snapshots.go
new file mode 100644
index 000000000..df9d29bd6
--- /dev/null
+++ b/vendor/github.com/linode/linodego/instance_snapshots.go
@@ -0,0 +1,181 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// InstanceBackupsResponse response struct for backup snapshot
+type InstanceBackupsResponse struct {
+ Automatic []*InstanceSnapshot `json:"automatic"`
+ Snapshot *InstanceBackupSnapshotResponse `json:"snapshot"`
+}
+
+// InstanceBackupSnapshotResponse fields are those representing Instance Backup Snapshots
+type InstanceBackupSnapshotResponse struct {
+ Current *InstanceSnapshot `json:"current"`
+ InProgress *InstanceSnapshot `json:"in_progress"`
+}
+
+// RestoreInstanceOptions fields are those accepted by InstanceRestore
+type RestoreInstanceOptions struct {
+ LinodeID int `json:"linode_id"`
+ Overwrite bool `json:"overwrite"`
+}
+
+// InstanceSnapshot represents a linode backup snapshot
+type InstanceSnapshot struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Status InstanceSnapshotStatus `json:"status"`
+ Type string `json:"type"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+ Finished *time.Time `json:"-"`
+ Configs []string `json:"configs"`
+ Disks []*InstanceSnapshotDisk `json:"disks"`
+}
+
+// InstanceSnapshotDisk fields represent the source disk of a Snapshot
+type InstanceSnapshotDisk struct {
+ Label string `json:"label"`
+ Size int `json:"size"`
+ Filesystem string `json:"filesystem"`
+}
+
+// InstanceSnapshotStatus constants start with Snapshot and include Linode API Instance Backup Snapshot status values
+type InstanceSnapshotStatus string
+
+// InstanceSnapshotStatus constants reflect the current status of an Instance Snapshot
+var (
+ SnapshotPaused InstanceSnapshotStatus = "paused"
+ SnapshotPending InstanceSnapshotStatus = "pending"
+ SnapshotRunning InstanceSnapshotStatus = "running"
+ SnapshotNeedsPostProcessing InstanceSnapshotStatus = "needsPostProcessing"
+ SnapshotSuccessful InstanceSnapshotStatus = "successful"
+ SnapshotFailed InstanceSnapshotStatus = "failed"
+ SnapshotUserAborted InstanceSnapshotStatus = "userAborted"
+)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *InstanceSnapshot) UnmarshalJSON(b []byte) error {
+ type Mask InstanceSnapshot
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ Finished *parseabletime.ParseableTime `json:"finished"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Updated = (*time.Time)(p.Updated)
+ i.Finished = (*time.Time)(p.Finished)
+
+ return nil
+}
+
+// GetInstanceSnapshot gets the snapshot with the provided ID
+func (c *Client) GetInstanceSnapshot(ctx context.Context, linodeID int, snapshotID int) (*InstanceSnapshot, error) {
+ e, err := c.InstanceSnapshots.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, snapshotID)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceSnapshot{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceSnapshot), nil
+}
+
+// CreateInstanceSnapshot Creates or Replaces the snapshot Backup of a Linode. If a previous snapshot exists for this Linode, it will be deleted.
+func (c *Client) CreateInstanceSnapshot(ctx context.Context, linodeID int, label string) (*InstanceSnapshot, error) {
+ o, err := json.Marshal(map[string]string{"label": label})
+ if err != nil {
+ return nil, err
+ }
+ body := string(o)
+ e, err := c.InstanceSnapshots.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetBody(body).
+ SetResult(&InstanceSnapshot{}).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*InstanceSnapshot), nil
+}
+
+// GetInstanceBackups gets the Instance's available Backups.
+// This is not called ListInstanceBackups because a single object is returned, matching the API response.
+func (c *Client) GetInstanceBackups(ctx context.Context, linodeID int) (*InstanceBackupsResponse, error) {
+ e, err := c.InstanceSnapshots.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetResult(&InstanceBackupsResponse{}).
+ Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceBackupsResponse), nil
+}
+
+// EnableInstanceBackups Enables backups for the specified Linode.
+func (c *Client) EnableInstanceBackups(ctx context.Context, linodeID int) error {
+ e, err := c.InstanceSnapshots.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/enable", e)
+
+ _, err = coupleAPIErrors(c.R(ctx).Post(e))
+ return err
+}
+
+// CancelInstanceBackups Cancels backups for the specified Linode.
+func (c *Client) CancelInstanceBackups(ctx context.Context, linodeID int) error {
+ e, err := c.InstanceSnapshots.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/cancel", e)
+
+ _, err = coupleAPIErrors(c.R(ctx).Post(e))
+ return err
+}
+
+// RestoreInstanceBackup Restores a Linode's Backup to the specified Linode.
+func (c *Client) RestoreInstanceBackup(ctx context.Context, linodeID int, backupID int, opts RestoreInstanceOptions) error {
+ o, err := json.Marshal(opts)
+ if err != nil {
+ return NewError(err)
+ }
+ body := string(o)
+ e, err := c.InstanceSnapshots.endpointWithParams(linodeID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/restore", e, backupID)
+
+ _, err = coupleAPIErrors(c.R(ctx).SetBody(body).Post(e))
+
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/instance_stats.go b/vendor/github.com/linode/linodego/instance_stats.go
new file mode 100644
index 000000000..29fd192b8
--- /dev/null
+++ b/vendor/github.com/linode/linodego/instance_stats.go
@@ -0,0 +1,68 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// StatsNet represents a network stats object
+type StatsNet struct {
+ In [][]float64 `json:"in"`
+ Out [][]float64 `json:"out"`
+ PrivateIn [][]float64 `json:"private_in"`
+ PrivateOut [][]float64 `json:"private_out"`
+}
+
+// StatsIO represents an IO stats object
+type StatsIO struct {
+ IO [][]float64 `json:"io"`
+ Swap [][]float64 `json:"swap"`
+}
+
+// InstanceStatsData represents an instance stats data object
+type InstanceStatsData struct {
+ CPU [][]float64 `json:"cpu"`
+ IO StatsIO `json:"io"`
+ NetV4 StatsNet `json:"netv4"`
+ NetV6 StatsNet `json:"netv6"`
+}
+
+// InstanceStats represents an instance stats object
+type InstanceStats struct {
+ Title string `json:"title"`
+ Data InstanceStatsData `json:"data"`
+}
+
+// endpointWithIDAndDate gets the endpoint URL for InstanceStats of a given Instance and Year/Month
+func endpointWithIDAndDate(c *Client, id int, year int, month int) string {
+ endpoint, err := c.InstanceStats.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+
+ endpoint = fmt.Sprintf("%s/%d/%d", endpoint, year, month)
+ return endpoint
+}
+
+// GetInstanceStats gets the template with the provided ID
+func (c *Client) GetInstanceStats(ctx context.Context, linodeID int) (*InstanceStats, error) {
+ e, err := c.InstanceStats.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceStats{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceStats), nil
+}
+
+// GetInstanceStatsByDate gets the template with the provided ID, year, and month
+func (c *Client) GetInstanceStatsByDate(ctx context.Context, linodeID int, year int, month int) (*InstanceStats, error) {
+ e := endpointWithIDAndDate(c, linodeID, year, month)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceStats{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceStats), nil
+}
diff --git a/vendor/github.com/linode/linodego/instance_volumes.go b/vendor/github.com/linode/linodego/instance_volumes.go
new file mode 100644
index 000000000..856caf78e
--- /dev/null
+++ b/vendor/github.com/linode/linodego/instance_volumes.go
@@ -0,0 +1,35 @@
+package linodego
+
+import (
+ "context"
+)
+
+// InstanceVolumesPagedResponse represents a paginated InstanceVolume API response
+type InstanceVolumesPagedResponse struct {
+ *PageOptions
+ Data []Volume `json:"data"`
+}
+
+// endpoint gets the endpoint URL for InstanceVolume
+func (InstanceVolumesPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.InstanceVolumes.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends InstanceVolumes when processing paginated InstanceVolume responses
+func (resp *InstanceVolumesPagedResponse) appendData(r *InstanceVolumesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListInstanceVolumes lists InstanceVolumes
+func (c *Client) ListInstanceVolumes(ctx context.Context, linodeID int, opts *ListOptions) ([]Volume, error) {
+ response := InstanceVolumesPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, linodeID, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
diff --git a/vendor/github.com/linode/linodego/instances.go b/vendor/github.com/linode/linodego/instances.go
new file mode 100644
index 000000000..f03c4646f
--- /dev/null
+++ b/vendor/github.com/linode/linodego/instances.go
@@ -0,0 +1,503 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+/*
+ * https://developers.linode.com/v4/reference/endpoints/linode/instances
+ */
+
+// InstanceStatus constants start with Instance and include Linode API Instance Status values
+type InstanceStatus string
+
+// InstanceStatus constants reflect the current status of an Instance
+const (
+ InstanceBooting InstanceStatus = "booting"
+ InstanceRunning InstanceStatus = "running"
+ InstanceOffline InstanceStatus = "offline"
+ InstanceShuttingDown InstanceStatus = "shutting_down"
+ InstanceRebooting InstanceStatus = "rebooting"
+ InstanceProvisioning InstanceStatus = "provisioning"
+ InstanceDeleting InstanceStatus = "deleting"
+ InstanceMigrating InstanceStatus = "migrating"
+ InstanceRebuilding InstanceStatus = "rebuilding"
+ InstanceCloning InstanceStatus = "cloning"
+ InstanceRestoring InstanceStatus = "restoring"
+ InstanceResizing InstanceStatus = "resizing"
+)
+
+// Instance represents a linode object
+type Instance struct {
+ ID int `json:"id"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+ Region string `json:"region"`
+ Alerts *InstanceAlert `json:"alerts"`
+ Backups *InstanceBackup `json:"backups"`
+ Image string `json:"image"`
+ Group string `json:"group"`
+ IPv4 []*net.IP `json:"ipv4"`
+ IPv6 string `json:"ipv6"`
+ Label string `json:"label"`
+ Type string `json:"type"`
+ Status InstanceStatus `json:"status"`
+ Hypervisor string `json:"hypervisor"`
+ Specs *InstanceSpec `json:"specs"`
+ WatchdogEnabled bool `json:"watchdog_enabled"`
+ Tags []string `json:"tags"`
+}
+
+// InstanceSpec represents a linode spec
+type InstanceSpec struct {
+ Disk int `json:"disk"`
+ Memory int `json:"memory"`
+ VCPUs int `json:"vcpus"`
+ Transfer int `json:"transfer"`
+}
+
+// InstanceAlert represents a metric alert
+type InstanceAlert struct {
+ CPU int `json:"cpu"`
+ IO int `json:"io"`
+ NetworkIn int `json:"network_in"`
+ NetworkOut int `json:"network_out"`
+ TransferQuota int `json:"transfer_quota"`
+}
+
+// InstanceBackup represents backup settings for an instance
+type InstanceBackup struct {
+ Enabled bool `json:"enabled"`
+ Schedule struct {
+ Day string `json:"day,omitempty"`
+ Window string `json:"window,omitempty"`
+ }
+}
+
+// InstanceTransfer pool stats for a Linode Instance during the current billing month
+type InstanceTransfer struct {
+ // Bytes of transfer this instance has consumed
+ Used int `json:"used"`
+
+ // GB of billable transfer this instance has consumed
+ Billable int `json:"billable"`
+
+ // GB of transfer this instance adds to the Transfer pool
+ Quota int `json:"quota"`
+}
+
+// InstanceCreateOptions require only Region and Type
+type InstanceCreateOptions struct {
+ Region string `json:"region"`
+ Type string `json:"type"`
+ Label string `json:"label,omitempty"`
+ Group string `json:"group,omitempty"`
+ RootPass string `json:"root_pass,omitempty"`
+ AuthorizedKeys []string `json:"authorized_keys,omitempty"`
+ AuthorizedUsers []string `json:"authorized_users,omitempty"`
+ StackScriptID int `json:"stackscript_id,omitempty"`
+ StackScriptData map[string]string `json:"stackscript_data,omitempty"`
+ BackupID int `json:"backup_id,omitempty"`
+ Image string `json:"image,omitempty"`
+ Interfaces []InstanceConfigInterface `json:"interfaces,omitempty"`
+ BackupsEnabled bool `json:"backups_enabled,omitempty"`
+ PrivateIP bool `json:"private_ip,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+
+ // Creation fields that need to be set explicitly false, "", or 0 use pointers
+ SwapSize *int `json:"swap_size,omitempty"`
+ Booted *bool `json:"booted,omitempty"`
+}
+
+// InstanceUpdateOptions is an options struct used when Updating an Instance
+type InstanceUpdateOptions struct {
+ Label string `json:"label,omitempty"`
+ Group string `json:"group,omitempty"`
+ Backups *InstanceBackup `json:"backups,omitempty"`
+ Alerts *InstanceAlert `json:"alerts,omitempty"`
+ WatchdogEnabled *bool `json:"watchdog_enabled,omitempty"`
+ Tags *[]string `json:"tags,omitempty"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Instance) UnmarshalJSON(b []byte) error {
+ type Mask Instance
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Updated = (*time.Time)(p.Updated)
+
+ return nil
+}
+
+// GetUpdateOptions converts an Instance to InstanceUpdateOptions for use in UpdateInstance
+func (i *Instance) GetUpdateOptions() InstanceUpdateOptions {
+ return InstanceUpdateOptions{
+ Label: i.Label,
+ Group: i.Group,
+ Backups: i.Backups,
+ Alerts: i.Alerts,
+ WatchdogEnabled: &i.WatchdogEnabled,
+ Tags: &i.Tags,
+ }
+}
+
+// InstanceCloneOptions is an options struct sent when Cloning an Instance
+type InstanceCloneOptions struct {
+ Region string `json:"region,omitempty"`
+ Type string `json:"type,omitempty"`
+
+ // LinodeID is an optional existing instance to use as the target of the clone
+ LinodeID int `json:"linode_id,omitempty"`
+ Label string `json:"label,omitempty"`
+ Group string `json:"group,omitempty"`
+ BackupsEnabled bool `json:"backups_enabled"`
+ Disks []int `json:"disks,omitempty"`
+ Configs []int `json:"configs,omitempty"`
+}
+
+// InstanceResizeOptions is an options struct used when resizing an instance
+type InstanceResizeOptions struct {
+ Type string `json:"type"`
+
+ // When enabled, an instance resize will also resize a data disk if the instance has no more than one data disk and one swap disk
+ AllowAutoDiskResize *bool `json:"allow_auto_disk_resize,omitempty"`
+}
+
+// InstancesPagedResponse represents a linode API response for listing
+type InstancesPagedResponse struct {
+ *PageOptions
+ Data []Instance `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Instance
+func (InstancesPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Instances.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends Instances when processing paginated Instance responses
+func (resp *InstancesPagedResponse) appendData(r *InstancesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListInstances lists linode instances
+func (c *Client) ListInstances(ctx context.Context, opts *ListOptions) ([]Instance, error) {
+ response := InstancesPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetInstance gets the instance with the provided ID
+func (c *Client) GetInstance(ctx context.Context, linodeID int) (*Instance, error) {
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, linodeID)
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetResult(Instance{}).
+ Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Instance), nil
+}
+
+// GetInstanceTransfer gets the instance with the provided ID
+func (c *Client) GetInstanceTransfer(ctx context.Context, linodeID int) (*InstanceTransfer, error) {
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d/transfer", e, linodeID)
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetResult(InstanceTransfer{}).
+ Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceTransfer), nil
+}
+
+// CreateInstance creates a Linode instance
+func (c *Client) CreateInstance(ctx context.Context, instance InstanceCreateOptions) (*Instance, error) {
+ var body string
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Instance{})
+
+ if bodyData, err := json.Marshal(instance); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Instance), nil
+}
+
+// UpdateInstance creates a Linode instance
+func (c *Client) UpdateInstance(ctx context.Context, id int, instance InstanceUpdateOptions) (*Instance, error) {
+ var body string
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&Instance{})
+
+ if bodyData, err := json.Marshal(instance); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Instance), nil
+}
+
+// RenameInstance renames an Instance
+func (c *Client) RenameInstance(ctx context.Context, linodeID int, label string) (*Instance, error) {
+ return c.UpdateInstance(ctx, linodeID, InstanceUpdateOptions{Label: label})
+}
+
+// DeleteInstance deletes a Linode instance
+func (c *Client) DeleteInstance(ctx context.Context, id int) error {
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
+
+// BootInstance will boot a Linode instance
+// A configID of 0 will cause Linode to choose the last/best config
+func (c *Client) BootInstance(ctx context.Context, id int, configID int) error {
+ bodyStr := ""
+
+ if configID != 0 {
+ bodyMap := map[string]int{"config_id": configID}
+ bodyJSON, err := json.Marshal(bodyMap)
+ if err != nil {
+ return NewError(err)
+ }
+ bodyStr = string(bodyJSON)
+ }
+
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%d/boot", e, id)
+ _, err = coupleAPIErrors(c.R(ctx).
+ SetBody(bodyStr).
+ Post(e))
+
+ return err
+}
+
+// CloneInstance clone an existing Instances Disks and Configuration profiles to another Linode Instance
+func (c *Client) CloneInstance(ctx context.Context, id int, options InstanceCloneOptions) (*Instance, error) {
+ var body string
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d/clone", e, id)
+
+ req := c.R(ctx).SetResult(&Instance{})
+
+ if bodyData, err := json.Marshal(options); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*Instance), nil
+}
+
+// RebootInstance reboots a Linode instance
+// A configID of 0 will cause Linode to choose the last/best config
+func (c *Client) RebootInstance(ctx context.Context, id int, configID int) error {
+ bodyStr := "{}"
+
+ if configID != 0 {
+ bodyMap := map[string]int{"config_id": configID}
+ bodyJSON, err := json.Marshal(bodyMap)
+ if err != nil {
+ return NewError(err)
+ }
+ bodyStr = string(bodyJSON)
+ }
+
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%d/reboot", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).
+ SetBody(bodyStr).
+ Post(e))
+
+ return err
+}
+
+// InstanceRebuildOptions is a struct representing the options to send to the rebuild linode endpoint
+type InstanceRebuildOptions struct {
+ Image string `json:"image,omitempty"`
+ RootPass string `json:"root_pass,omitempty"`
+ AuthorizedKeys []string `json:"authorized_keys,omitempty"`
+ AuthorizedUsers []string `json:"authorized_users,omitempty"`
+ StackScriptID int `json:"stackscript_id,omitempty"`
+ StackScriptData map[string]string `json:"stackscript_data,omitempty"`
+ Booted *bool `json:"booted,omitempty"`
+}
+
+// RebuildInstance Deletes all Disks and Configs on this Linode,
+// then deploys a new Image to this Linode with the given attributes.
+func (c *Client) RebuildInstance(ctx context.Context, id int, opts InstanceRebuildOptions) (*Instance, error) {
+ o, err := json.Marshal(opts)
+ if err != nil {
+ return nil, NewError(err)
+ }
+ b := string(o)
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d/rebuild", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetBody(b).
+ SetResult(&Instance{}).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Instance), nil
+}
+
+// InstanceRescueOptions fields are those accepted by RescueInstance
+type InstanceRescueOptions struct {
+ Devices InstanceConfigDeviceMap `json:"devices"`
+}
+
+// RescueInstance reboots an instance into a safe environment for performing many system recovery and disk management tasks.
+// Rescue Mode is based on the Finnix recovery distribution, a self-contained and bootable Linux distribution.
+// You can also use Rescue Mode for tasks other than disaster recovery, such as formatting disks to use different filesystems,
+// copying data between disks, and downloading files from a disk via SSH and SFTP.
+func (c *Client) RescueInstance(ctx context.Context, id int, opts InstanceRescueOptions) error {
+ o, err := json.Marshal(opts)
+ if err != nil {
+ return NewError(err)
+ }
+ b := string(o)
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/rescue", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).
+ SetBody(b).
+ Post(e))
+
+ return err
+}
+
+// ResizeInstance resizes an instance to new Linode type
+func (c *Client) ResizeInstance(ctx context.Context, id int, opts InstanceResizeOptions) error {
+ o, err := json.Marshal(opts)
+ if err != nil {
+ return NewError(err)
+ }
+ body := string(o)
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/resize", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).
+ SetBody(body).
+ Post(e))
+
+ return err
+}
+
+// ShutdownInstance - Shutdown an instance
+func (c *Client) ShutdownInstance(ctx context.Context, id int) error {
+ return c.simpleInstanceAction(ctx, "shutdown", id)
+}
+
+// MutateInstance Upgrades a Linode to its next generation.
+func (c *Client) MutateInstance(ctx context.Context, id int) error {
+ return c.simpleInstanceAction(ctx, "mutate", id)
+}
+
+// MigrateInstance - Migrate an instance
+func (c *Client) MigrateInstance(ctx context.Context, id int) error {
+ return c.simpleInstanceAction(ctx, "migrate", id)
+}
+
+// simpleInstanceAction is a helper for Instance actions that take no parameters
+// and return empty responses `{}` unless they return a standard error
+func (c *Client) simpleInstanceAction(ctx context.Context, action string, id int) error {
+ e, err := c.Instances.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/%s", e, id, action)
+ _, err = coupleAPIErrors(c.R(ctx).Post(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/internal/duration/duration.go b/vendor/github.com/linode/linodego/internal/duration/duration.go
new file mode 100644
index 000000000..088f12f02
--- /dev/null
+++ b/vendor/github.com/linode/linodego/internal/duration/duration.go
@@ -0,0 +1,63 @@
+package duration
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "strconv"
+ "strings"
+)
+
+func UnmarshalTimeRemaining(m json.RawMessage) *int {
+ jsonBytes, err := m.MarshalJSON()
+ if err != nil {
+ panic(jsonBytes)
+ }
+
+ if len(jsonBytes) == 4 && string(jsonBytes) == "null" {
+ return nil
+ }
+
+ var timeStr string
+ if err := json.Unmarshal(jsonBytes, &timeStr); err == nil && len(timeStr) > 0 {
+ if dur, err := durationToSeconds(timeStr); err != nil {
+ panic(err)
+ } else {
+ return &dur
+ }
+ } else {
+ var intPtr int
+ if err := json.Unmarshal(jsonBytes, &intPtr); err == nil {
+ return &intPtr
+ }
+ }
+
+ log.Println("[WARN] Unexpected unmarshalTimeRemaining value: ", jsonBytes)
+
+ return nil
+}
+
+// durationToSeconds takes a hh:mm:ss string and returns the number of seconds.
+func durationToSeconds(s string) (int, error) {
+ multipliers := [3]int{60 * 60, 60, 1}
+ segs := strings.Split(s, ":")
+
+ if len(segs) > len(multipliers) {
+ return 0, fmt.Errorf("too many ':' separators in time duration: %s", s)
+ }
+
+ var d int
+
+ l := len(segs)
+
+ for i := 0; i < l; i++ {
+ m, err := strconv.Atoi(segs[i])
+ if err != nil {
+ return 0, err
+ }
+
+ d += m * multipliers[i+len(multipliers)-l]
+ }
+
+ return d, nil
+}
diff --git a/vendor/github.com/linode/linodego/internal/parseabletime/parseable_time.go b/vendor/github.com/linode/linodego/internal/parseabletime/parseable_time.go
new file mode 100644
index 000000000..c471442db
--- /dev/null
+++ b/vendor/github.com/linode/linodego/internal/parseabletime/parseable_time.go
@@ -0,0 +1,22 @@
+package parseabletime
+
+import (
+ "time"
+)
+
+const (
+ dateLayout = "2006-01-02T15:04:05"
+)
+
+type ParseableTime time.Time
+
+func (p *ParseableTime) UnmarshalJSON(b []byte) error {
+ t, err := time.Parse(`"`+dateLayout+`"`, string(b))
+ if err != nil {
+ return err
+ }
+
+ *p = ParseableTime(t)
+
+ return nil
+}
diff --git a/vendor/github.com/linode/linodego/kernels.go b/vendor/github.com/linode/linodego/kernels.go
new file mode 100644
index 000000000..b812dc05b
--- /dev/null
+++ b/vendor/github.com/linode/linodego/kernels.go
@@ -0,0 +1,62 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// LinodeKernel represents a Linode Instance kernel object
+type LinodeKernel struct {
+ ID string `json:"id"`
+ Label string `json:"label"`
+ Version string `json:"version"`
+ Architecture string `json:"architecture"`
+ Deprecated bool `json:"deprecated"`
+ KVM bool `json:"kvm"`
+ XEN bool `json:"xen"`
+ PVOPS bool `json:"pvops"`
+}
+
+// LinodeKernelsPagedResponse represents a Linode kernels API response for listing
+type LinodeKernelsPagedResponse struct {
+ *PageOptions
+ Data []LinodeKernel `json:"data"`
+}
+
+// ListKernels lists linode kernels
+func (c *Client) ListKernels(ctx context.Context, opts *ListOptions) ([]LinodeKernel, error) {
+ response := LinodeKernelsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+func (LinodeKernelsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Kernels.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *LinodeKernelsPagedResponse) appendData(r *LinodeKernelsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// GetKernel gets the kernel with the provided ID
+func (c *Client) GetKernel(ctx context.Context, kernelID string) (*LinodeKernel, error) {
+ e, err := c.Kernels.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, kernelID)
+ r, err := c.R(ctx).
+ SetResult(&LinodeKernel{}).
+ Get(e)
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LinodeKernel), nil
+}
diff --git a/vendor/github.com/linode/linodego/lke_cluster_pools.go b/vendor/github.com/linode/linodego/lke_cluster_pools.go
new file mode 100644
index 000000000..6ff366aca
--- /dev/null
+++ b/vendor/github.com/linode/linodego/lke_cluster_pools.go
@@ -0,0 +1,205 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// LKELinodeStatus constants start with LKELinode and include
+// Linode API LKEClusterPool Linode Status values
+type LKELinodeStatus string
+
+// LKEClusterPoolStatus constants reflect the current status of an LKEClusterPool
+const (
+ LKELinodeReady LKELinodeStatus = "ready"
+ LKELinodeNotReady LKELinodeStatus = "not_ready"
+)
+
+// LKEClusterPoolDisk represents a Node disk in an LKEClusterPool object
+type LKEClusterPoolDisk struct {
+ Size int `json:"size"`
+ Type string `json:"type"`
+}
+
+type LKEClusterPoolAutoscaler struct {
+ Enabled bool `json:"enabled"`
+ Min int `json:"min"`
+ Max int `json:"max"`
+}
+
+// LKEClusterPoolLinode represents a LKEClusterPoolLinode object
+type LKEClusterPoolLinode struct {
+ ID string `json:"id"`
+ InstanceID int `json:"instance_id"`
+ Status LKELinodeStatus `json:"status"`
+}
+
+// LKEClusterPool represents a LKEClusterPool object
+type LKEClusterPool struct {
+ ID int `json:"id"`
+ Count int `json:"count"`
+ Type string `json:"type"`
+ Disks []LKEClusterPoolDisk `json:"disks"`
+ Linodes []LKEClusterPoolLinode `json:"nodes"`
+ Tags []string `json:"tags"`
+
+ Autoscaler LKEClusterPoolAutoscaler `json:"autoscaler"`
+}
+
+// LKEClusterPoolCreateOptions fields are those accepted by CreateLKEClusterPool
+type LKEClusterPoolCreateOptions struct {
+ Count int `json:"count"`
+ Type string `json:"type"`
+ Disks []LKEClusterPoolDisk `json:"disks"`
+ Tags []string `json:"tags"`
+
+ Autoscaler *LKEClusterPoolAutoscaler `json:"autoscaler,omitempty"`
+}
+
+// LKEClusterPoolUpdateOptions fields are those accepted by UpdateLKEClusterPool
+type LKEClusterPoolUpdateOptions struct {
+ Count int `json:"count,omitempty"`
+ Tags *[]string `json:"tags,omitempty"`
+
+ Autoscaler *LKEClusterPoolAutoscaler `json:"autoscaler,omitempty"`
+}
+
+// GetCreateOptions converts a LKEClusterPool to LKEClusterPoolCreateOptions for
+// use in CreateLKEClusterPool
+func (l LKEClusterPool) GetCreateOptions() (o LKEClusterPoolCreateOptions) {
+ o.Count = l.Count
+ o.Disks = l.Disks
+ o.Tags = l.Tags
+ o.Autoscaler = &l.Autoscaler
+ return
+}
+
+// GetUpdateOptions converts a LKEClusterPool to LKEClusterPoolUpdateOptions for use in UpdateLKEClusterPool
+func (l LKEClusterPool) GetUpdateOptions() (o LKEClusterPoolUpdateOptions) {
+ o.Count = l.Count
+ o.Tags = &l.Tags
+ o.Autoscaler = &l.Autoscaler
+ return
+}
+
+// LKEClusterPoolsPagedResponse represents a paginated LKEClusterPool API response
+type LKEClusterPoolsPagedResponse struct {
+ *PageOptions
+ Data []LKEClusterPool `json:"data"`
+}
+
+// endpointWithID gets the endpoint URL for InstanceConfigs of a given Instance
+func (LKEClusterPoolsPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.LKEClusterPools.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends LKEClusterPools when processing paginated LKEClusterPool responses
+func (resp *LKEClusterPoolsPagedResponse) appendData(r *LKEClusterPoolsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListLKEClusterPools lists LKEClusterPools
+func (c *Client) ListLKEClusterPools(ctx context.Context, clusterID int, opts *ListOptions) ([]LKEClusterPool, error) {
+ response := LKEClusterPoolsPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, clusterID, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
+
+// GetLKEClusterPool gets the lkeClusterPool with the provided ID
+func (c *Client) GetLKEClusterPool(ctx context.Context, clusterID, id int) (*LKEClusterPool, error) {
+ e, err := c.LKEClusterPools.endpointWithParams(clusterID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&LKEClusterPool{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKEClusterPool), nil
+}
+
+// CreateLKEClusterPool creates a LKEClusterPool
+func (c *Client) CreateLKEClusterPool(ctx context.Context, clusterID int, createOpts LKEClusterPoolCreateOptions) (*LKEClusterPool, error) {
+ var body string
+ e, err := c.LKEClusterPools.endpointWithParams(clusterID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&LKEClusterPool{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKEClusterPool), nil
+}
+
+// UpdateLKEClusterPool updates the LKEClusterPool with the specified id
+func (c *Client) UpdateLKEClusterPool(ctx context.Context, clusterID, id int, updateOpts LKEClusterPoolUpdateOptions) (*LKEClusterPool, error) {
+ var body string
+ e, err := c.LKEClusterPools.endpointWithParams(clusterID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&LKEClusterPool{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKEClusterPool), nil
+}
+
+// DeleteLKEClusterPool deletes the LKEClusterPool with the specified id
+func (c *Client) DeleteLKEClusterPool(ctx context.Context,
+ clusterID, id int) error {
+ e, err := c.LKEClusterPools.endpointWithParams(clusterID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
+
+// DeleteLKEClusterPoolNode deletes a given node from a cluster pool
+func (c *Client) DeleteLKEClusterPoolNode(ctx context.Context, clusterID int, id string) error {
+ e, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/nodes/%s", e, clusterID, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/lke_clusters.go b/vendor/github.com/linode/linodego/lke_clusters.go
new file mode 100644
index 000000000..08e085bc5
--- /dev/null
+++ b/vendor/github.com/linode/linodego/lke_clusters.go
@@ -0,0 +1,308 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// LKEClusterStatus represents the status of an LKECluster
+type LKEClusterStatus string
+
+// LKEClusterStatus enums start with LKECluster
+const (
+ LKEClusterReady LKEClusterStatus = "ready"
+ LKEClusterNotReady LKEClusterStatus = "not_ready"
+)
+
+// LKECluster represents a LKECluster object
+type LKECluster struct {
+ ID int `json:"id"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+ Label string `json:"label"`
+ Region string `json:"region"`
+ Status LKEClusterStatus `json:"status"`
+ K8sVersion string `json:"k8s_version"`
+ Tags []string `json:"tags"`
+}
+
+// LKEClusterCreateOptions fields are those accepted by CreateLKECluster
+type LKEClusterCreateOptions struct {
+ NodePools []LKEClusterPoolCreateOptions `json:"node_pools"`
+ Label string `json:"label"`
+ Region string `json:"region"`
+ K8sVersion string `json:"k8s_version"`
+ Tags []string `json:"tags,omitempty"`
+}
+
+// LKEClusterUpdateOptions fields are those accepted by UpdateLKECluster
+type LKEClusterUpdateOptions struct {
+ K8sVersion string `json:"k8s_version,omitempty"`
+ Label string `json:"label,omitempty"`
+ Tags *[]string `json:"tags,omitempty"`
+}
+
+// LKEClusterAPIEndpoint fields are those returned by ListLKEClusterAPIEndpoints
+type LKEClusterAPIEndpoint struct {
+ Endpoint string `json:"endpoint"`
+}
+
+// LKEClusterKubeconfig fields are those returned by GetLKEClusterKubeconfig
+type LKEClusterKubeconfig struct {
+ KubeConfig string `json:"kubeconfig"`
+}
+
+// LKEVersion fields are those returned by GetLKEVersion
+type LKEVersion struct {
+ ID string `json:"id"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *LKECluster) UnmarshalJSON(b []byte) error {
+ type Mask LKECluster
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Updated = (*time.Time)(p.Updated)
+
+ return nil
+}
+
+// GetCreateOptions converts a LKECluster to LKEClusterCreateOptions for use in CreateLKECluster
+func (i LKECluster) GetCreateOptions() (o LKEClusterCreateOptions) {
+ o.Label = i.Label
+ o.Region = i.Region
+ o.K8sVersion = i.K8sVersion
+ o.Tags = i.Tags
+ // @TODO copy NodePools?
+ return
+}
+
+// GetUpdateOptions converts a LKECluster to LKEClusterUpdateOptions for use in UpdateLKECluster
+func (i LKECluster) GetUpdateOptions() (o LKEClusterUpdateOptions) {
+ o.K8sVersion = i.K8sVersion
+ o.Label = i.Label
+ o.Tags = &i.Tags
+ return
+}
+
+// LKEClustersPagedResponse represents a paginated LKECluster API response
+type LKEClustersPagedResponse struct {
+ *PageOptions
+ Data []LKECluster `json:"data"`
+}
+
+// LKEClusterAPIEndpointsPagedResponse represents a paginated LKEClusterAPIEndpoints API response
+type LKEClusterAPIEndpointsPagedResponse struct {
+ *PageOptions
+ Data []LKEClusterAPIEndpoint `json:"data"`
+}
+
+// LKEVersionsPagedResponse represents a paginated LKEVersion API response
+type LKEVersionsPagedResponse struct {
+ *PageOptions
+ Data []LKEVersion `json:"data"`
+}
+
+// endpoint gets the endpoint URL for LKECluster
+func (LKEClustersPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends LKEClusters when processing paginated LKECluster responses
+func (resp *LKEClustersPagedResponse) appendData(r *LKEClustersPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// endpoint gets the endpoint URL for LKEVersion
+func (LKEVersionsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.LKEVersions.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// endpoint gets the endpoint URL for LKEClusterAPIEndpoints
+func (LKEClusterAPIEndpointsPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.LKEClusterAPIEndpoints.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends LKEClusterAPIEndpoints when processing paginated LKEClusterAPIEndpoints responses
+func (resp *LKEClusterAPIEndpointsPagedResponse) appendData(r *LKEClusterAPIEndpointsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// appendData appends LKEVersions when processing paginated LKEVersion responses
+func (resp *LKEVersionsPagedResponse) appendData(r *LKEVersionsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListLKEClusters lists LKEClusters
+func (c *Client) ListLKEClusters(ctx context.Context, opts *ListOptions) ([]LKECluster, error) {
+ response := LKEClustersPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetLKECluster gets the lkeCluster with the provided ID
+func (c *Client) GetLKECluster(ctx context.Context, id int) (*LKECluster, error) {
+ e, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&LKECluster{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKECluster), nil
+}
+
+// CreateLKECluster creates a LKECluster
+func (c *Client) CreateLKECluster(ctx context.Context, createOpts LKEClusterCreateOptions) (*LKECluster, error) {
+ var body string
+ e, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&LKECluster{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKECluster), nil
+}
+
+// UpdateLKECluster updates the LKECluster with the specified id
+func (c *Client) UpdateLKECluster(ctx context.Context, id int, updateOpts LKEClusterUpdateOptions) (*LKECluster, error) {
+ var body string
+ e, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&LKECluster{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKECluster), nil
+}
+
+// DeleteLKECluster deletes the LKECluster with the specified id
+func (c *Client) DeleteLKECluster(ctx context.Context, id int) error {
+ e, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
+
+// ListLKEClusterAPIEndpoints gets the API Endpoint for the LKE Cluster specified
+func (c *Client) ListLKEClusterAPIEndpoints(ctx context.Context, clusterID int, opts *ListOptions) ([]LKEClusterAPIEndpoint, error) {
+ response := LKEClusterAPIEndpointsPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, clusterID, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetLKEClusterKubeconfig gets the Kubeconfig for the LKE Cluster specified
+func (c *Client) GetLKEClusterKubeconfig(ctx context.Context, id int) (*LKEClusterKubeconfig, error) {
+ e, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d/kubeconfig", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&LKEClusterKubeconfig{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKEClusterKubeconfig), nil
+}
+
+// RecycleLKEClusterNodes recycles all nodes in all pools of the specified LKE Cluster.
+func (c *Client) RecycleLKEClusterNodes(ctx context.Context, id int) error {
+ e, err := c.LKEClusters.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d/recycle", e, id)
+ _, err = coupleAPIErrors(c.R(ctx).Post(e))
+ return err
+}
+
+// GetLKEVersion gets details about a specific LKE Version
+func (c *Client) GetLKEVersion(ctx context.Context, version string) (*LKEVersion, error) {
+ e, err := c.LKEVersions.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, version)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&LKEVersion{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LKEVersion), nil
+}
+
+// ListLKEVersions lists the Kubernetes versions available through LKE
+func (c *Client) ListLKEVersions(ctx context.Context, opts *ListOptions) ([]LKEVersion, error) {
+ response := LKEVersionsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
diff --git a/vendor/github.com/linode/linodego/longview.go b/vendor/github.com/linode/linodego/longview.go
new file mode 100644
index 000000000..cf76f8f65
--- /dev/null
+++ b/vendor/github.com/linode/linodego/longview.go
@@ -0,0 +1,57 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// LongviewClient represents a LongviewClient object
+type LongviewClient struct {
+ ID int `json:"id"`
+ // UpdatedStr string `json:"updated"`
+ // Updated *time.Time `json:"-"`
+}
+
+// LongviewClientsPagedResponse represents a paginated LongviewClient API response
+type LongviewClientsPagedResponse struct {
+ *PageOptions
+ Data []LongviewClient `json:"data"`
+}
+
+// endpoint gets the endpoint URL for LongviewClient
+func (LongviewClientsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.LongviewClients.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends LongviewClients when processing paginated LongviewClient responses
+func (resp *LongviewClientsPagedResponse) appendData(r *LongviewClientsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListLongviewClients lists LongviewClients
+func (c *Client) ListLongviewClients(ctx context.Context, opts *ListOptions) ([]LongviewClient, error) {
+ response := LongviewClientsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetLongviewClient gets the template with the provided ID
+func (c *Client) GetLongviewClient(ctx context.Context, id string) (*LongviewClient, error) {
+ e, err := c.LongviewClients.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := c.R(ctx).SetResult(&LongviewClient{}).Get(e)
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LongviewClient), nil
+}
diff --git a/vendor/github.com/linode/linodego/longview_subscriptions.go b/vendor/github.com/linode/linodego/longview_subscriptions.go
new file mode 100644
index 000000000..230fce4b9
--- /dev/null
+++ b/vendor/github.com/linode/linodego/longview_subscriptions.go
@@ -0,0 +1,60 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// LongviewSubscription represents a LongviewSubscription object
+type LongviewSubscription struct {
+ ID string `json:"id"`
+ Label string `json:"label"`
+ ClientsIncluded int `json:"clients_included"`
+ Price *LinodePrice `json:"price"`
+ // UpdatedStr string `json:"updated"`
+ // Updated *time.Time `json:"-"`
+}
+
+// LongviewSubscriptionsPagedResponse represents a paginated LongviewSubscription API response
+type LongviewSubscriptionsPagedResponse struct {
+ *PageOptions
+ Data []LongviewSubscription `json:"data"`
+}
+
+// endpoint gets the endpoint URL for LongviewSubscription
+func (LongviewSubscriptionsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.LongviewSubscriptions.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends LongviewSubscriptions when processing paginated LongviewSubscription responses
+func (resp *LongviewSubscriptionsPagedResponse) appendData(r *LongviewSubscriptionsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListLongviewSubscriptions lists LongviewSubscriptions
+func (c *Client) ListLongviewSubscriptions(ctx context.Context, opts *ListOptions) ([]LongviewSubscription, error) {
+ response := LongviewSubscriptionsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetLongviewSubscription gets the template with the provided ID
+func (c *Client) GetLongviewSubscription(ctx context.Context, id string) (*LongviewSubscription, error) {
+ e, err := c.LongviewSubscriptions.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := c.R(ctx).SetResult(&LongviewSubscription{}).Get(e)
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LongviewSubscription), nil
+}
diff --git a/vendor/github.com/linode/linodego/managed.go b/vendor/github.com/linode/linodego/managed.go
new file mode 100644
index 000000000..152361292
--- /dev/null
+++ b/vendor/github.com/linode/linodego/managed.go
@@ -0,0 +1 @@
+package linodego
diff --git a/vendor/github.com/linode/linodego/network_ips.go b/vendor/github.com/linode/linodego/network_ips.go
new file mode 100644
index 000000000..306dc1bf1
--- /dev/null
+++ b/vendor/github.com/linode/linodego/network_ips.go
@@ -0,0 +1,89 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// IPAddressesPagedResponse represents a paginated IPAddress API response
+type IPAddressesPagedResponse struct {
+ *PageOptions
+ Data []InstanceIP `json:"data"`
+}
+
+// IPAddressUpdateOptions fields are those accepted by UpdateToken
+type IPAddressUpdateOptions struct {
+ // The reverse DNS assigned to this address. For public IPv4 addresses, this will be set to a default value provided by Linode if set to nil.
+ RDNS *string `json:"rdns"`
+}
+
+// GetUpdateOptions converts a IPAddress to IPAddressUpdateOptions for use in UpdateIPAddress
+func (i InstanceIP) GetUpdateOptions() (o IPAddressUpdateOptions) {
+ o.RDNS = copyString(&i.RDNS)
+ return
+}
+
+// endpoint gets the endpoint URL for IPAddress
+func (IPAddressesPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.IPAddresses.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends IPAddresses when processing paginated InstanceIPAddress responses
+func (resp *IPAddressesPagedResponse) appendData(r *IPAddressesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListIPAddresses lists IPAddresses
+func (c *Client) ListIPAddresses(ctx context.Context, opts *ListOptions) ([]InstanceIP, error) {
+ response := IPAddressesPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetIPAddress gets the template with the provided ID
+func (c *Client) GetIPAddress(ctx context.Context, id string) (*InstanceIP, error) {
+ e, err := c.IPAddresses.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIP{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceIP), nil
+}
+
+// UpdateIPAddress updates the IPAddress with the specified id
+func (c *Client) UpdateIPAddress(ctx context.Context, id string, updateOpts IPAddressUpdateOptions) (*InstanceIP, error) {
+ var body string
+ e, err := c.IPAddresses.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+
+ req := c.R(ctx).SetResult(&InstanceIP{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*InstanceIP), nil
+}
diff --git a/vendor/github.com/linode/linodego/network_pools.go b/vendor/github.com/linode/linodego/network_pools.go
new file mode 100644
index 000000000..03085923c
--- /dev/null
+++ b/vendor/github.com/linode/linodego/network_pools.go
@@ -0,0 +1,50 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// IPv6PoolsPagedResponse represents a paginated IPv6Pool API response
+type IPv6PoolsPagedResponse struct {
+ *PageOptions
+ Data []IPv6Range `json:"data"`
+}
+
+// endpoint gets the endpoint URL for IPv6Pool
+func (IPv6PoolsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.IPv6Pools.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends IPv6Pools when processing paginated IPv6Pool responses
+func (resp *IPv6PoolsPagedResponse) appendData(r *IPv6PoolsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListIPv6Pools lists IPv6Pools
+func (c *Client) ListIPv6Pools(ctx context.Context, opts *ListOptions) ([]IPv6Range, error) {
+ response := IPv6PoolsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetIPv6Pool gets the template with the provided ID
+func (c *Client) GetIPv6Pool(ctx context.Context, id string) (*IPv6Range, error) {
+ e, err := c.IPv6Pools.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&IPv6Range{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*IPv6Range), nil
+}
diff --git a/vendor/github.com/linode/linodego/network_ranges.go b/vendor/github.com/linode/linodego/network_ranges.go
new file mode 100644
index 000000000..0c0ba15a3
--- /dev/null
+++ b/vendor/github.com/linode/linodego/network_ranges.go
@@ -0,0 +1,50 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// IPv6RangesPagedResponse represents a paginated IPv6Range API response
+type IPv6RangesPagedResponse struct {
+ *PageOptions
+ Data []IPv6Range `json:"data"`
+}
+
+// endpoint gets the endpoint URL for IPv6Range
+func (IPv6RangesPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.IPv6Ranges.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends IPv6Ranges when processing paginated IPv6Range responses
+func (resp *IPv6RangesPagedResponse) appendData(r *IPv6RangesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListIPv6Ranges lists IPv6Ranges
+func (c *Client) ListIPv6Ranges(ctx context.Context, opts *ListOptions) ([]IPv6Range, error) {
+ response := IPv6RangesPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetIPv6Range gets the template with the provided ID
+func (c *Client) GetIPv6Range(ctx context.Context, id string) (*IPv6Range, error) {
+ e, err := c.IPv6Ranges.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&IPv6Range{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*IPv6Range), nil
+}
diff --git a/vendor/github.com/linode/linodego/nodebalancer.go b/vendor/github.com/linode/linodego/nodebalancer.go
new file mode 100644
index 000000000..90f0e92d5
--- /dev/null
+++ b/vendor/github.com/linode/linodego/nodebalancer.go
@@ -0,0 +1,212 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// NodeBalancer represents a NodeBalancer object
+type NodeBalancer struct {
+ // This NodeBalancer's unique ID.
+ ID int `json:"id"`
+ // This NodeBalancer's label. These must be unique on your Account.
+ Label *string `json:"label"`
+ // The Region where this NodeBalancer is located. NodeBalancers only support backends in the same Region.
+ Region string `json:"region"`
+ // This NodeBalancer's hostname, ending with .nodebalancer.linode.com
+ Hostname *string `json:"hostname"`
+ // This NodeBalancer's public IPv4 address.
+ IPv4 *string `json:"ipv4"`
+ // This NodeBalancer's public IPv6 address.
+ IPv6 *string `json:"ipv6"`
+ // Throttle connections per second (0-20). Set to 0 (zero) to disable throttling.
+ ClientConnThrottle int `json:"client_conn_throttle"`
+ // Information about the amount of transfer this NodeBalancer has had so far this month.
+ Transfer NodeBalancerTransfer `json:"transfer"`
+
+ // An array of tags applied to this object. Tags are for organizational purposes only.
+ Tags []string `json:"tags"`
+
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+}
+
+// NodeBalancerTransfer contains information about the amount of transfer a NodeBalancer has had in the current month
+type NodeBalancerTransfer struct {
+ // The total transfer, in MB, used by this NodeBalancer this month.
+ Total *float64 `json:"total"`
+ // The total inbound transfer, in MB, used for this NodeBalancer this month.
+ Out *float64 `json:"out"`
+ // The total outbound transfer, in MB, used for this NodeBalancer this month.
+ In *float64 `json:"in"`
+}
+
+// NodeBalancerCreateOptions are the options permitted for CreateNodeBalancer
+type NodeBalancerCreateOptions struct {
+ Label *string `json:"label,omitempty"`
+ Region string `json:"region,omitempty"`
+ ClientConnThrottle *int `json:"client_conn_throttle,omitempty"`
+ Configs []*NodeBalancerConfigCreateOptions `json:"configs,omitempty"`
+ Tags []string `json:"tags"`
+}
+
+// NodeBalancerUpdateOptions are the options permitted for UpdateNodeBalancer
+type NodeBalancerUpdateOptions struct {
+ Label *string `json:"label,omitempty"`
+ ClientConnThrottle *int `json:"client_conn_throttle,omitempty"`
+ Tags *[]string `json:"tags,omitempty"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *NodeBalancer) UnmarshalJSON(b []byte) error {
+ type Mask NodeBalancer
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Updated = (*time.Time)(p.Updated)
+
+ return nil
+}
+
+// GetCreateOptions converts a NodeBalancer to NodeBalancerCreateOptions for use in CreateNodeBalancer
+func (i NodeBalancer) GetCreateOptions() NodeBalancerCreateOptions {
+ return NodeBalancerCreateOptions{
+ Label: i.Label,
+ Region: i.Region,
+ ClientConnThrottle: &i.ClientConnThrottle,
+ Tags: i.Tags,
+ }
+}
+
+// GetUpdateOptions converts a NodeBalancer to NodeBalancerUpdateOptions for use in UpdateNodeBalancer
+func (i NodeBalancer) GetUpdateOptions() NodeBalancerUpdateOptions {
+ return NodeBalancerUpdateOptions{
+ Label: i.Label,
+ ClientConnThrottle: &i.ClientConnThrottle,
+ Tags: &i.Tags,
+ }
+}
+
+// NodeBalancersPagedResponse represents a paginated NodeBalancer API response
+type NodeBalancersPagedResponse struct {
+ *PageOptions
+ Data []NodeBalancer `json:"data"`
+}
+
+func (NodeBalancersPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.NodeBalancers.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *NodeBalancersPagedResponse) appendData(r *NodeBalancersPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListNodeBalancers lists NodeBalancers
+func (c *Client) ListNodeBalancers(ctx context.Context, opts *ListOptions) ([]NodeBalancer, error) {
+ response := NodeBalancersPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetNodeBalancer gets the NodeBalancer with the provided ID
+func (c *Client) GetNodeBalancer(ctx context.Context, id int) (*NodeBalancer, error) {
+ e, err := c.NodeBalancers.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetResult(&NodeBalancer{}).
+ Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancer), nil
+}
+
+// CreateNodeBalancer creates a NodeBalancer
+func (c *Client) CreateNodeBalancer(ctx context.Context, nodebalancer NodeBalancerCreateOptions) (*NodeBalancer, error) {
+ var body string
+ e, err := c.NodeBalancers.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&NodeBalancer{})
+
+ if bodyData, err := json.Marshal(nodebalancer); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetHeader("Content-Type", "application/json").
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancer), nil
+}
+
+// UpdateNodeBalancer updates the NodeBalancer with the specified id
+func (c *Client) UpdateNodeBalancer(ctx context.Context, id int, updateOpts NodeBalancerUpdateOptions) (*NodeBalancer, error) {
+ var body string
+ e, err := c.NodeBalancers.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&NodeBalancer{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancer), nil
+}
+
+// DeleteNodeBalancer deletes the NodeBalancer with the specified id
+func (c *Client) DeleteNodeBalancer(ctx context.Context, id int) error {
+ e, err := c.NodeBalancers.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/nodebalancer_config_nodes.go b/vendor/github.com/linode/linodego/nodebalancer_config_nodes.go
new file mode 100644
index 000000000..a5cf02a9d
--- /dev/null
+++ b/vendor/github.com/linode/linodego/nodebalancer_config_nodes.go
@@ -0,0 +1,179 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// NodeBalancerNode objects represent a backend that can accept traffic for a NodeBalancer Config
+type NodeBalancerNode struct {
+ ID int `json:"id"`
+ Address string `json:"address"`
+ Label string `json:"label"`
+ Status string `json:"status"`
+ Weight int `json:"weight"`
+ Mode NodeMode `json:"mode"`
+ ConfigID int `json:"config_id"`
+ NodeBalancerID int `json:"nodebalancer_id"`
+}
+
+// NodeMode is the mode a NodeBalancer should use when sending traffic to a NodeBalancer Node
+type NodeMode string
+
+var (
+ // ModeAccept is the NodeMode indicating a NodeBalancer Node is accepting traffic
+ ModeAccept NodeMode = "accept"
+
+ // ModeReject is the NodeMode indicating a NodeBalancer Node is not receiving traffic
+ ModeReject NodeMode = "reject"
+
+ // ModeDrain is the NodeMode indicating a NodeBalancer Node is not receiving new traffic, but may continue receiving traffic from pinned connections
+ ModeDrain NodeMode = "drain"
+
+ // ModeBackup is the NodeMode indicating a NodeBalancer Node will only receive traffic if all "accept" Nodes are down
+ ModeBackup NodeMode = "backup"
+)
+
+// NodeBalancerNodeCreateOptions fields are those accepted by CreateNodeBalancerNode
+type NodeBalancerNodeCreateOptions struct {
+ Address string `json:"address"`
+ Label string `json:"label"`
+ Weight int `json:"weight,omitempty"`
+ Mode NodeMode `json:"mode,omitempty"`
+}
+
+// NodeBalancerNodeUpdateOptions fields are those accepted by UpdateNodeBalancerNode
+type NodeBalancerNodeUpdateOptions struct {
+ Address string `json:"address,omitempty"`
+ Label string `json:"label,omitempty"`
+ Weight int `json:"weight,omitempty"`
+ Mode NodeMode `json:"mode,omitempty"`
+}
+
+// GetCreateOptions converts a NodeBalancerNode to NodeBalancerNodeCreateOptions for use in CreateNodeBalancerNode
+func (i NodeBalancerNode) GetCreateOptions() NodeBalancerNodeCreateOptions {
+ return NodeBalancerNodeCreateOptions{
+ Address: i.Address,
+ Label: i.Label,
+ Weight: i.Weight,
+ Mode: i.Mode,
+ }
+}
+
+// GetUpdateOptions converts a NodeBalancerNode to NodeBalancerNodeUpdateOptions for use in UpdateNodeBalancerNode
+func (i NodeBalancerNode) GetUpdateOptions() NodeBalancerNodeUpdateOptions {
+ return NodeBalancerNodeUpdateOptions{
+ Address: i.Address,
+ Label: i.Label,
+ Weight: i.Weight,
+ Mode: i.Mode,
+ }
+}
+
+// NodeBalancerNodesPagedResponse represents a paginated NodeBalancerNode API response
+type NodeBalancerNodesPagedResponse struct {
+ *PageOptions
+ Data []NodeBalancerNode `json:"data"`
+}
+
+// endpoint gets the endpoint URL for NodeBalancerNode
+func (NodeBalancerNodesPagedResponse) endpointWithTwoIDs(c *Client, nodebalancerID int, configID int) string {
+ endpoint, err := c.NodeBalancerNodes.endpointWithParams(nodebalancerID, configID)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends NodeBalancerNodes when processing paginated NodeBalancerNode responses
+func (resp *NodeBalancerNodesPagedResponse) appendData(r *NodeBalancerNodesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListNodeBalancerNodes lists NodeBalancerNodes
+func (c *Client) ListNodeBalancerNodes(ctx context.Context, nodebalancerID int, configID int, opts *ListOptions) ([]NodeBalancerNode, error) {
+ response := NodeBalancerNodesPagedResponse{}
+ err := c.listHelperWithTwoIDs(ctx, &response, nodebalancerID, configID, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetNodeBalancerNode gets the template with the provided ID
+func (c *Client) GetNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, nodeID int) (*NodeBalancerNode, error) {
+ e, err := c.NodeBalancerNodes.endpointWithParams(nodebalancerID, configID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, nodeID)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&NodeBalancerNode{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerNode), nil
+}
+
+// CreateNodeBalancerNode creates a NodeBalancerNode
+func (c *Client) CreateNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, createOpts NodeBalancerNodeCreateOptions) (*NodeBalancerNode, error) {
+ var body string
+ e, err := c.NodeBalancerNodes.endpointWithParams(nodebalancerID, configID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&NodeBalancerNode{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerNode), nil
+}
+
+// UpdateNodeBalancerNode updates the NodeBalancerNode with the specified id
+func (c *Client) UpdateNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, nodeID int, updateOpts NodeBalancerNodeUpdateOptions) (*NodeBalancerNode, error) {
+ var body string
+ e, err := c.NodeBalancerNodes.endpointWithParams(nodebalancerID, configID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, nodeID)
+
+ req := c.R(ctx).SetResult(&NodeBalancerNode{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerNode), nil
+}
+
+// DeleteNodeBalancerNode deletes the NodeBalancerNode with the specified id
+func (c *Client) DeleteNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, nodeID int) error {
+ e, err := c.NodeBalancerNodes.endpointWithParams(nodebalancerID, configID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, nodeID)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/nodebalancer_configs.go b/vendor/github.com/linode/linodego/nodebalancer_configs.go
new file mode 100644
index 000000000..ec35d0826
--- /dev/null
+++ b/vendor/github.com/linode/linodego/nodebalancer_configs.go
@@ -0,0 +1,338 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// NodeBalancerConfig objects allow a NodeBalancer to accept traffic on a new port
+type NodeBalancerConfig struct {
+ ID int `json:"id"`
+ Port int `json:"port"`
+ Protocol ConfigProtocol `json:"protocol"`
+ ProxyProtocol ConfigProxyProtocol `json:"proxy_protocol"`
+ Algorithm ConfigAlgorithm `json:"algorithm"`
+ Stickiness ConfigStickiness `json:"stickiness"`
+ Check ConfigCheck `json:"check"`
+ CheckInterval int `json:"check_interval"`
+ CheckAttempts int `json:"check_attempts"`
+ CheckPath string `json:"check_path"`
+ CheckBody string `json:"check_body"`
+ CheckPassive bool `json:"check_passive"`
+ CheckTimeout int `json:"check_timeout"`
+ CipherSuite ConfigCipher `json:"cipher_suite"`
+ NodeBalancerID int `json:"nodebalancer_id"`
+ SSLCommonName string `json:"ssl_commonname"`
+ SSLFingerprint string `json:"ssl_fingerprint"`
+ SSLCert string `json:"ssl_cert"`
+ SSLKey string `json:"ssl_key"`
+ NodesStatus *NodeBalancerNodeStatus `json:"nodes_status"`
+}
+
+// ConfigAlgorithm constants start with Algorithm and include Linode API NodeBalancer Config Algorithms
+type ConfigAlgorithm string
+
+// ConfigAlgorithm constants reflect the NodeBalancer Config Algorithm
+const (
+ AlgorithmRoundRobin ConfigAlgorithm = "roundrobin"
+ AlgorithmLeastConn ConfigAlgorithm = "leastconn"
+ AlgorithmSource ConfigAlgorithm = "source"
+)
+
+// ConfigStickiness constants start with Stickiness and include Linode API NodeBalancer Config Stickiness
+type ConfigStickiness string
+
+// ConfigStickiness constants reflect the node stickiness method for a NodeBalancer Config
+const (
+ StickinessNone ConfigStickiness = "none"
+ StickinessTable ConfigStickiness = "table"
+ StickinessHTTPCookie ConfigStickiness = "http_cookie"
+)
+
+// ConfigCheck constants start with Check and include Linode API NodeBalancer Config Check methods
+type ConfigCheck string
+
+// ConfigCheck constants reflect the node health status checking method for a NodeBalancer Config
+const (
+ CheckNone ConfigCheck = "none"
+ CheckConnection ConfigCheck = "connection"
+ CheckHTTP ConfigCheck = "http"
+ CheckHTTPBody ConfigCheck = "http_body"
+)
+
+// ConfigProtocol constants start with Protocol and include Linode API Nodebalancer Config protocols
+type ConfigProtocol string
+
+// ConfigProtocol constants reflect the protocol used by a NodeBalancer Config
+const (
+ ProtocolHTTP ConfigProtocol = "http"
+ ProtocolHTTPS ConfigProtocol = "https"
+ ProtocolTCP ConfigProtocol = "tcp"
+)
+
+// ConfigProxyProtocol constants start with ProxyProtocol and include Linode API NodeBalancer Config proxy protocol versions
+type ConfigProxyProtocol string
+
+// ConfigProxyProtocol constatns reflect the proxy protocol version used by a NodeBalancer Config
+const (
+ ProxyProtocolNone ConfigProxyProtocol = "none"
+ ProxyProtocolV1 ConfigProxyProtocol = "v1"
+ ProxyProtocolV2 ConfigProxyProtocol = "v2"
+)
+
+// ConfigCipher constants start with Cipher and include Linode API NodeBalancer Config Cipher values
+type ConfigCipher string
+
+// ConfigCipher constants reflect the preferred cipher set for a NodeBalancer Config
+const (
+ CipherRecommended ConfigCipher = "recommended"
+ CipherLegacy ConfigCipher = "legacy"
+)
+
+// NodeBalancerNodeStatus represents the total number of nodes whose status is Up or Down
+type NodeBalancerNodeStatus struct {
+ Up int `json:"up"`
+ Down int `json:"down"`
+}
+
+// NodeBalancerConfigCreateOptions are permitted by CreateNodeBalancerConfig
+type NodeBalancerConfigCreateOptions struct {
+ Port int `json:"port"`
+ Protocol ConfigProtocol `json:"protocol,omitempty"`
+ ProxyProtocol ConfigProxyProtocol `json:"proxy_protocol,omitempty"`
+ Algorithm ConfigAlgorithm `json:"algorithm,omitempty"`
+ Stickiness ConfigStickiness `json:"stickiness,omitempty"`
+ Check ConfigCheck `json:"check,omitempty"`
+ CheckInterval int `json:"check_interval,omitempty"`
+ CheckAttempts int `json:"check_attempts,omitempty"`
+ CheckPath string `json:"check_path,omitempty"`
+ CheckBody string `json:"check_body,omitempty"`
+ CheckPassive *bool `json:"check_passive,omitempty"`
+ CheckTimeout int `json:"check_timeout,omitempty"`
+ CipherSuite ConfigCipher `json:"cipher_suite,omitempty"`
+ SSLCert string `json:"ssl_cert,omitempty"`
+ SSLKey string `json:"ssl_key,omitempty"`
+ Nodes []NodeBalancerNodeCreateOptions `json:"nodes,omitempty"`
+}
+
+// NodeBalancerConfigRebuildOptions used by RebuildNodeBalancerConfig
+type NodeBalancerConfigRebuildOptions struct {
+ Port int `json:"port"`
+ Protocol ConfigProtocol `json:"protocol,omitempty"`
+ ProxyProtocol ConfigProxyProtocol `json:"proxy_protocol,omitempty"`
+ Algorithm ConfigAlgorithm `json:"algorithm,omitempty"`
+ Stickiness ConfigStickiness `json:"stickiness,omitempty"`
+ Check ConfigCheck `json:"check,omitempty"`
+ CheckInterval int `json:"check_interval,omitempty"`
+ CheckAttempts int `json:"check_attempts,omitempty"`
+ CheckPath string `json:"check_path,omitempty"`
+ CheckBody string `json:"check_body,omitempty"`
+ CheckPassive *bool `json:"check_passive,omitempty"`
+ CheckTimeout int `json:"check_timeout,omitempty"`
+ CipherSuite ConfigCipher `json:"cipher_suite,omitempty"`
+ SSLCert string `json:"ssl_cert,omitempty"`
+ SSLKey string `json:"ssl_key,omitempty"`
+ Nodes []NodeBalancerNodeCreateOptions `json:"nodes"`
+}
+
+// NodeBalancerConfigUpdateOptions are permitted by UpdateNodeBalancerConfig
+type NodeBalancerConfigUpdateOptions NodeBalancerConfigCreateOptions
+
+// GetCreateOptions converts a NodeBalancerConfig to NodeBalancerConfigCreateOptions for use in CreateNodeBalancerConfig
+func (i NodeBalancerConfig) GetCreateOptions() NodeBalancerConfigCreateOptions {
+ return NodeBalancerConfigCreateOptions{
+ Port: i.Port,
+ Protocol: i.Protocol,
+ ProxyProtocol: i.ProxyProtocol,
+ Algorithm: i.Algorithm,
+ Stickiness: i.Stickiness,
+ Check: i.Check,
+ CheckInterval: i.CheckInterval,
+ CheckAttempts: i.CheckAttempts,
+ CheckTimeout: i.CheckTimeout,
+ CheckPath: i.CheckPath,
+ CheckBody: i.CheckBody,
+ CheckPassive: copyBool(&i.CheckPassive),
+ CipherSuite: i.CipherSuite,
+ SSLCert: i.SSLCert,
+ SSLKey: i.SSLKey,
+ }
+}
+
+// GetUpdateOptions converts a NodeBalancerConfig to NodeBalancerConfigUpdateOptions for use in UpdateNodeBalancerConfig
+func (i NodeBalancerConfig) GetUpdateOptions() NodeBalancerConfigUpdateOptions {
+ return NodeBalancerConfigUpdateOptions{
+ Port: i.Port,
+ Protocol: i.Protocol,
+ ProxyProtocol: i.ProxyProtocol,
+ Algorithm: i.Algorithm,
+ Stickiness: i.Stickiness,
+ Check: i.Check,
+ CheckInterval: i.CheckInterval,
+ CheckAttempts: i.CheckAttempts,
+ CheckPath: i.CheckPath,
+ CheckBody: i.CheckBody,
+ CheckPassive: copyBool(&i.CheckPassive),
+ CheckTimeout: i.CheckTimeout,
+ CipherSuite: i.CipherSuite,
+ SSLCert: i.SSLCert,
+ SSLKey: i.SSLKey,
+ }
+}
+
+// GetRebuildOptions converts a NodeBalancerConfig to NodeBalancerConfigRebuildOptions for use in RebuildNodeBalancerConfig
+func (i NodeBalancerConfig) GetRebuildOptions() NodeBalancerConfigRebuildOptions {
+ return NodeBalancerConfigRebuildOptions{
+ Port: i.Port,
+ Protocol: i.Protocol,
+ ProxyProtocol: i.ProxyProtocol,
+ Algorithm: i.Algorithm,
+ Stickiness: i.Stickiness,
+ Check: i.Check,
+ CheckInterval: i.CheckInterval,
+ CheckAttempts: i.CheckAttempts,
+ CheckTimeout: i.CheckTimeout,
+ CheckPath: i.CheckPath,
+ CheckBody: i.CheckBody,
+ CheckPassive: copyBool(&i.CheckPassive),
+ CipherSuite: i.CipherSuite,
+ SSLCert: i.SSLCert,
+ SSLKey: i.SSLKey,
+ Nodes: make([]NodeBalancerNodeCreateOptions, 0),
+ }
+}
+
+// NodeBalancerConfigsPagedResponse represents a paginated NodeBalancerConfig API response
+type NodeBalancerConfigsPagedResponse struct {
+ *PageOptions
+ Data []NodeBalancerConfig `json:"data"`
+}
+
+// endpointWithID gets the endpoint URL for NodeBalancerConfig
+func (NodeBalancerConfigsPagedResponse) endpointWithID(c *Client, id int) string {
+ endpoint, err := c.NodeBalancerConfigs.endpointWithParams(id)
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends NodeBalancerConfigs when processing paginated NodeBalancerConfig responses
+func (resp *NodeBalancerConfigsPagedResponse) appendData(r *NodeBalancerConfigsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListNodeBalancerConfigs lists NodeBalancerConfigs
+func (c *Client) ListNodeBalancerConfigs(ctx context.Context, nodebalancerID int, opts *ListOptions) ([]NodeBalancerConfig, error) {
+ response := NodeBalancerConfigsPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, nodebalancerID, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetNodeBalancerConfig gets the template with the provided ID
+func (c *Client) GetNodeBalancerConfig(ctx context.Context, nodebalancerID int, configID int) (*NodeBalancerConfig, error) {
+ e, err := c.NodeBalancerConfigs.endpointWithParams(nodebalancerID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, configID)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&NodeBalancerConfig{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerConfig), nil
+}
+
+// CreateNodeBalancerConfig creates a NodeBalancerConfig
+func (c *Client) CreateNodeBalancerConfig(ctx context.Context, nodebalancerID int, nodebalancerConfig NodeBalancerConfigCreateOptions) (*NodeBalancerConfig, error) {
+ var body string
+ e, err := c.NodeBalancerConfigs.endpointWithParams(nodebalancerID)
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&NodeBalancerConfig{})
+
+ if bodyData, err := json.Marshal(nodebalancerConfig); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetHeader("Content-Type", "application/json").
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerConfig), nil
+}
+
+// UpdateNodeBalancerConfig updates the NodeBalancerConfig with the specified id
+func (c *Client) UpdateNodeBalancerConfig(ctx context.Context, nodebalancerID int, configID int, updateOpts NodeBalancerConfigUpdateOptions) (*NodeBalancerConfig, error) {
+ var body string
+ e, err := c.NodeBalancerConfigs.endpointWithParams(nodebalancerID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, configID)
+
+ req := c.R(ctx).SetResult(&NodeBalancerConfig{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerConfig), nil
+}
+
+// DeleteNodeBalancerConfig deletes the NodeBalancerConfig with the specified id
+func (c *Client) DeleteNodeBalancerConfig(ctx context.Context, nodebalancerID int, configID int) error {
+ e, err := c.NodeBalancerConfigs.endpointWithParams(nodebalancerID)
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, configID)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
+
+// RebuildNodeBalancerConfig updates the NodeBalancer with the specified id
+func (c *Client) RebuildNodeBalancerConfig(ctx context.Context, nodeBalancerID int, configID int, rebuildOpts NodeBalancerConfigRebuildOptions) (*NodeBalancerConfig, error) {
+ var body string
+ e, err := c.NodeBalancerConfigs.endpointWithParams(nodeBalancerID)
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d/rebuild", e, configID)
+
+ req := c.R(ctx).SetResult(&NodeBalancerConfig{})
+
+ if bodyData, err := json.Marshal(rebuildOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerConfig), nil
+}
diff --git a/vendor/github.com/linode/linodego/nodebalancer_stats.go b/vendor/github.com/linode/linodego/nodebalancer_stats.go
new file mode 100644
index 000000000..3eb200856
--- /dev/null
+++ b/vendor/github.com/linode/linodego/nodebalancer_stats.go
@@ -0,0 +1,36 @@
+package linodego
+
+import (
+ "context"
+)
+
+// NodeBalancerStats represents a nodebalancer stats object
+type NodeBalancerStats struct {
+ Title string `json:"title"`
+ Data NodeBalancerStatsData `json:"data"`
+}
+
+// NodeBalancerStatsData represents a nodebalancer stats data object
+type NodeBalancerStatsData struct {
+ Connections [][]float64 `json:"connections"`
+ Traffic StatsTraffic `json:"traffic"`
+}
+
+// StatsTraffic represents a Traffic stats object
+type StatsTraffic struct {
+ In [][]float64 `json:"in"`
+ Out [][]float64 `json:"out"`
+}
+
+// GetNodeBalancerStats gets the template with the provided ID
+func (c *Client) GetNodeBalancerStats(ctx context.Context, linodeID int) (*NodeBalancerStats, error) {
+ e, err := c.NodeBalancerStats.endpointWithParams(linodeID)
+ if err != nil {
+ return nil, err
+ }
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&NodeBalancerStats{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*NodeBalancerStats), nil
+}
diff --git a/vendor/github.com/linode/linodego/object_storage_bucket_certs.go b/vendor/github.com/linode/linodego/object_storage_bucket_certs.go
new file mode 100644
index 000000000..21979b92f
--- /dev/null
+++ b/vendor/github.com/linode/linodego/object_storage_bucket_certs.go
@@ -0,0 +1,59 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+)
+
+type ObjectStorageBucketCert struct {
+ SSL bool `json:"ssl"`
+}
+
+type ObjectStorageBucketCertUploadOptions struct {
+ Certificate string `json:"certificate"`
+ PrivateKey string `json:"private_key"`
+}
+
+// UploadObjectStorageBucketCert uploads a TLS/SSL Cert to be used with an Object Storage Bucket.
+func (c *Client) UploadObjectStorageBucketCert(ctx context.Context, clusterID, bucket string, uploadOpts ObjectStorageBucketCertUploadOptions) (*ObjectStorageBucketCert, error) {
+ e, err := c.ObjectStorageBucketCerts.endpointWithParams(clusterID, bucket)
+ if err != nil {
+ return nil, err
+ }
+
+ body, err := json.Marshal(uploadOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&ObjectStorageBucketCert{}).SetBody(string(body)).Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageBucketCert), nil
+}
+
+// GetObjectStorageBucketCert gets an ObjectStorageBucketCert
+func (c *Client) GetObjectStorageBucketCert(ctx context.Context, clusterID, bucket string) (*ObjectStorageBucketCert, error) {
+ e, err := c.ObjectStorageBucketCerts.endpointWithParams(clusterID, bucket)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&ObjectStorageBucketCert{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageBucketCert), nil
+}
+
+// DeleteObjectStorageBucketCert deletes an ObjectStorageBucketCert
+func (c *Client) DeleteObjectStorageBucketCert(ctx context.Context, clusterID, bucket string) error {
+ e, err := c.ObjectStorageBucketCerts.endpointWithParams(clusterID, bucket)
+ if err != nil {
+ return err
+ }
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/object_storage_buckets.go b/vendor/github.com/linode/linodego/object_storage_buckets.go
new file mode 100644
index 000000000..6c88d7085
--- /dev/null
+++ b/vendor/github.com/linode/linodego/object_storage_buckets.go
@@ -0,0 +1,198 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// ObjectStorageBucket represents a ObjectStorage object
+type ObjectStorageBucket struct {
+ Label string `json:"label"`
+ Cluster string `json:"cluster"`
+
+ Created *time.Time `json:"-"`
+ Hostname string `json:"hostname"`
+}
+
+// ObjectStorageBucketAccess holds Object Storage access info
+type ObjectStorageBucketAccess struct {
+ ACL ObjectStorageACL `json:"acl"`
+ CorsEnabled bool `json:"cors_enabled"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *ObjectStorageBucket) UnmarshalJSON(b []byte) error {
+ type Mask ObjectStorageBucket
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+
+ return nil
+}
+
+// ObjectStorageBucketCreateOptions fields are those accepted by CreateObjectStorageBucket
+type ObjectStorageBucketCreateOptions struct {
+ Cluster string `json:"cluster"`
+ Label string `json:"label"`
+
+ ACL ObjectStorageACL `json:"acl,omitempty"`
+ CorsEnabled *bool `json:"cors_enabled,omitempty"`
+}
+
+// ObjectStorageBucketUpdateAccessOptions fields are those accepted by UpdateObjectStorageBucketAccess
+type ObjectStorageBucketUpdateAccessOptions struct {
+ ACL ObjectStorageACL `json:"acl,omitempty"`
+ CorsEnabled *bool `json:"cors_enabled,omitempty"`
+}
+
+// ObjectStorageACL options start with ACL and include all known ACL types
+type ObjectStorageACL string
+
+// ObjectStorageACL options represent the access control level of a bucket.
+const (
+ ACLPrivate ObjectStorageACL = "private"
+ ACLPublicRead ObjectStorageACL = "public-read"
+ ACLAuthenticatedRead ObjectStorageACL = "authenticated-read"
+ ACLPublicReadWrite ObjectStorageACL = "public-read-write"
+)
+
+// ObjectStorageBucketsPagedResponse represents a paginated ObjectStorageBucket API response
+type ObjectStorageBucketsPagedResponse struct {
+ *PageOptions
+ Data []ObjectStorageBucket `json:"data"`
+}
+
+// endpoint gets the endpoint URL for ObjectStorageBucket
+func (ObjectStorageBucketsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends ObjectStorageBuckets when processing paginated ObjectStorageBucket responses
+func (resp *ObjectStorageBucketsPagedResponse) appendData(r *ObjectStorageBucketsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListObjectStorageBuckets lists ObjectStorageBuckets
+func (c *Client) ListObjectStorageBuckets(ctx context.Context, opts *ListOptions) ([]ObjectStorageBucket, error) {
+ response := ObjectStorageBucketsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetObjectStorageBucket gets the ObjectStorageBucket with the provided label
+func (c *Client) GetObjectStorageBucket(ctx context.Context, clusterID, label string) (*ObjectStorageBucket, error) {
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s/%s", e, clusterID, label)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&ObjectStorageBucket{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageBucket), nil
+}
+
+// CreateObjectStorageBucket creates an ObjectStorageBucket
+func (c *Client) CreateObjectStorageBucket(ctx context.Context, createOpts ObjectStorageBucketCreateOptions) (*ObjectStorageBucket, error) {
+ var body string
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&ObjectStorageBucket{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageBucket), nil
+}
+
+// GetObjectStorageBucketAccess gets the current access config for a bucket
+func (c *Client) GetObjectStorageBucketAccess(ctx context.Context, clusterID, label string) (*ObjectStorageBucketAccess, error) {
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ e = fmt.Sprintf("%s/%s/%s/access", e, clusterID, label)
+
+ req := c.R(ctx).SetResult(&ObjectStorageBucketAccess{})
+
+ r, err := coupleAPIErrors(
+ req.Get(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Result().(*ObjectStorageBucketAccess), nil
+}
+
+// UpdateObjectStorageBucketAccess updates the access configuration for an ObjectStorageBucket
+func (c *Client) UpdateObjectStorageBucketAccess(ctx context.Context, clusterID, label string, access ObjectStorageBucketUpdateAccessOptions) error {
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return err
+ }
+
+ e = fmt.Sprintf("%s/%s/%s/access", e, clusterID, label)
+
+ bodyData, err := json.Marshal(access)
+ if err != nil {
+ return err
+ }
+
+ body := string(bodyData)
+
+ _, err = coupleAPIErrors(c.R(ctx).
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// DeleteObjectStorageBucket deletes the ObjectStorageBucket with the specified label
+func (c *Client) DeleteObjectStorageBucket(ctx context.Context, clusterID, label string) error {
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%s/%s", e, clusterID, label)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/object_storage_clusters.go b/vendor/github.com/linode/linodego/object_storage_clusters.go
new file mode 100644
index 000000000..efc79acae
--- /dev/null
+++ b/vendor/github.com/linode/linodego/object_storage_clusters.go
@@ -0,0 +1,59 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// ObjectStorageCluster represents a linode object storage cluster object
+type ObjectStorageCluster struct {
+ ID string `json:"id"`
+ Domain string `json:"domain"`
+ Status string `json:"status"`
+ Region string `json:"region"`
+ StaticSiteDomain string `json:"static_site_domain"`
+}
+
+// ObjectStorageClustersPagedResponse represents a linode API response for listing
+type ObjectStorageClustersPagedResponse struct {
+ *PageOptions
+ Data []ObjectStorageCluster `json:"data"`
+}
+
+// endpoint gets the endpoint URL for ObjectStorageCluster
+func (ObjectStorageClustersPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.ObjectStorageClusters.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends ObjectStorageClusters when processing paginated ObjectStorageCluster responses
+func (resp *ObjectStorageClustersPagedResponse) appendData(r *ObjectStorageClustersPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListObjectStorageClusters lists ObjectStorageClusters
+func (c *Client) ListObjectStorageClusters(ctx context.Context, opts *ListOptions) ([]ObjectStorageCluster, error) {
+ response := ObjectStorageClustersPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetObjectStorageCluster gets the template with the provided ID
+func (c *Client) GetObjectStorageCluster(ctx context.Context, id string) (*ObjectStorageCluster, error) {
+ e, err := c.ObjectStorageClusters.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&ObjectStorageCluster{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageCluster), nil
+}
diff --git a/vendor/github.com/linode/linodego/object_storage_keys.go b/vendor/github.com/linode/linodego/object_storage_keys.go
new file mode 100644
index 000000000..8e0a4d5af
--- /dev/null
+++ b/vendor/github.com/linode/linodego/object_storage_keys.go
@@ -0,0 +1,142 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+// ObjectStorageKey represents a linode object storage key object
+type ObjectStorageKey struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ AccessKey string `json:"access_key"`
+ SecretKey string `json:"secret_key"`
+ Limited bool `json:"limited"`
+ BucketAccess *[]ObjectStorageKeyBucketAccess `json:"bucket_access"`
+}
+
+// ObjectStorageKeyBucketAccess represents a linode limited object storage key's bucket access
+type ObjectStorageKeyBucketAccess struct {
+ Cluster string `json:"cluster"`
+ BucketName string `json:"bucket_name"`
+ Permissions string `json:"permissions"`
+}
+
+// ObjectStorageKeyCreateOptions fields are those accepted by CreateObjectStorageKey
+type ObjectStorageKeyCreateOptions struct {
+ Label string `json:"label"`
+ BucketAccess *[]ObjectStorageKeyBucketAccess `json:"bucket_access"`
+}
+
+// ObjectStorageKeyUpdateOptions fields are those accepted by UpdateObjectStorageKey
+type ObjectStorageKeyUpdateOptions struct {
+ Label string `json:"label"`
+}
+
+// ObjectStorageKeysPagedResponse represents a linode API response for listing
+type ObjectStorageKeysPagedResponse struct {
+ *PageOptions
+ Data []ObjectStorageKey `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Object Storage keys
+func (ObjectStorageKeysPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.ObjectStorageKeys.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends ObjectStorageKeys when processing paginated Objkey responses
+func (resp *ObjectStorageKeysPagedResponse) appendData(r *ObjectStorageKeysPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListObjectStorageKeys lists ObjectStorageKeys
+func (c *Client) ListObjectStorageKeys(ctx context.Context, opts *ListOptions) ([]ObjectStorageKey, error) {
+ response := ObjectStorageKeysPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// CreateObjectStorageKey creates a ObjectStorageKey
+func (c *Client) CreateObjectStorageKey(ctx context.Context, createOpts ObjectStorageKeyCreateOptions) (*ObjectStorageKey, error) {
+ var body string
+ e, err := c.ObjectStorageKeys.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&ObjectStorageKey{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageKey), nil
+}
+
+// GetObjectStorageKey gets the object storage key with the provided ID
+func (c *Client) GetObjectStorageKey(ctx context.Context, id int) (*ObjectStorageKey, error) {
+ e, err := c.ObjectStorageKeys.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&ObjectStorageKey{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageKey), nil
+}
+
+// UpdateObjectStorageKey updates the object storage key with the specified id
+func (c *Client) UpdateObjectStorageKey(ctx context.Context, id int, updateOpts ObjectStorageKeyUpdateOptions) (*ObjectStorageKey, error) {
+ var body string
+ e, err := c.ObjectStorageKeys.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&ObjectStorageKey{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*ObjectStorageKey), nil
+}
+
+// DeleteObjectStorageKey deletes the ObjectStorageKey with the specified id
+func (c *Client) DeleteObjectStorageKey(ctx context.Context, id int) error {
+ e, err := c.ObjectStorageKeys.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/object_storage_object.go b/vendor/github.com/linode/linodego/object_storage_object.go
new file mode 100644
index 000000000..cff0fe260
--- /dev/null
+++ b/vendor/github.com/linode/linodego/object_storage_object.go
@@ -0,0 +1,83 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+)
+
+type ObjectStorageObjectURLCreateOptions struct {
+ Name string `json:"name"`
+ Method string `json:"method"`
+ ContentType string `json:"content_type,omit_empty"`
+ ContentDisposition string `json:"content_disposition,omit_empty"`
+ ExpiresIn *int `json:"expires_in,omit_empty"`
+}
+
+type ObjectStorageObjectURL struct {
+ URL string `json:"url"`
+ Exists bool `json:"exists"`
+}
+
+type ObjectStorageObjectACLConfig struct {
+ ACL string `json:"acl"`
+ ACLXML string `json:"acl_xml"`
+}
+
+type ObjectStorageObjectACLConfigUpdateOptions struct {
+ Name string `json:"name"`
+ ACL string `json:"acl"`
+}
+
+func (c *Client) CreateObjectStorageObjectURL(ctx context.Context, clusterID, label string, options ObjectStorageObjectURLCreateOptions) (*ObjectStorageObjectURL, error) {
+ var body string
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&ObjectStorageObjectURL{})
+ e = fmt.Sprintf("%s/%s/%s/object-url", e, clusterID, label)
+
+ if bodyData, err := json.Marshal(options); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.SetBody(body).Post(e))
+ return r.Result().(*ObjectStorageObjectURL), err
+}
+
+func (c *Client) GetObjectStorageObjectACLConfig(ctx context.Context, clusterID, label, object string) (*ObjectStorageObjectACLConfig, error) {
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&ObjectStorageObjectACLConfig{})
+ e = fmt.Sprintf("%s/%s/%s/object-acl?name=%s", e, clusterID, label, object)
+
+ r, err := coupleAPIErrors(req.Get(e))
+ return r.Result().(*ObjectStorageObjectACLConfig), err
+}
+
+func (c *Client) UpdateObjectStorageObjectACLConfig(ctx context.Context, clusterID, label string, options ObjectStorageObjectACLConfigUpdateOptions) (*ObjectStorageObjectACLConfig, error) {
+ var body string
+ e, err := c.ObjectStorageBuckets.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&ObjectStorageObjectACLConfig{})
+ e = fmt.Sprintf("%s/%s/%s/object-acl", e, clusterID, label)
+
+ if bodyData, err := json.Marshal(options); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.SetBody(body).Put(e))
+ return r.Result().(*ObjectStorageObjectACLConfig), err
+}
diff --git a/vendor/github.com/linode/linodego/pagination.go b/vendor/github.com/linode/linodego/pagination.go
new file mode 100644
index 000000000..3710cdfa3
--- /dev/null
+++ b/vendor/github.com/linode/linodego/pagination.go
@@ -0,0 +1,500 @@
+package linodego
+
+/**
+ * Pagination and Filtering types and helpers
+ */
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/go-resty/resty/v2"
+)
+
+// PageOptions are the pagination parameters for List endpoints
+type PageOptions struct {
+ Page int `url:"page,omitempty" json:"page"`
+ Pages int `url:"pages,omitempty" json:"pages"`
+ Results int `url:"results,omitempty" json:"results"`
+}
+
+// ListOptions are the pagination and filtering (TODO) parameters for endpoints
+type ListOptions struct {
+ *PageOptions
+ PageSize int
+ Filter string
+}
+
+// NewListOptions simplified construction of ListOptions using only
+// the two writable properties, Page and Filter
+func NewListOptions(page int, filter string) *ListOptions {
+ return &ListOptions{PageOptions: &PageOptions{Page: page}, Filter: filter}
+}
+
+func applyListOptionsToRequest(opts *ListOptions, req *resty.Request) {
+ if opts != nil {
+ if opts.PageOptions != nil && opts.Page > 0 {
+ req.SetQueryParam("page", strconv.Itoa(opts.Page))
+ }
+
+ if opts.PageSize > 0 {
+ req.SetQueryParam("page_size", strconv.Itoa(opts.PageSize))
+ }
+
+ if len(opts.Filter) > 0 {
+ req.SetHeader("X-Filter", opts.Filter)
+ }
+ }
+}
+
+// listHelper abstracts fetching and pagination for GET endpoints that
+// do not require any Ids (top level endpoints).
+// When opts (or opts.Page) is nil, all pages will be fetched and
+// returned in a single (endpoint-specific)PagedResponse
+// opts.results and opts.pages will be updated from the API response
+// nolint
+func (c *Client) listHelper(ctx context.Context, i interface{}, opts *ListOptions) error {
+ var (
+ err error
+ pages int
+ results int
+ r *resty.Response
+ )
+
+ req := c.R(ctx)
+ applyListOptionsToRequest(opts, req)
+
+ switch v := i.(type) {
+ case *LinodeKernelsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LinodeKernelsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*LinodeKernelsPagedResponse).Pages
+ results = r.Result().(*LinodeKernelsPagedResponse).Results
+ v.appendData(r.Result().(*LinodeKernelsPagedResponse))
+ }
+ case *LinodeTypesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LinodeTypesPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*LinodeTypesPagedResponse).Pages
+ results = r.Result().(*LinodeTypesPagedResponse).Results
+ v.appendData(r.Result().(*LinodeTypesPagedResponse))
+ }
+ case *ImagesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(ImagesPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*ImagesPagedResponse).Pages
+ results = r.Result().(*ImagesPagedResponse).Results
+ v.appendData(r.Result().(*ImagesPagedResponse))
+ }
+ case *StackscriptsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(StackscriptsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*StackscriptsPagedResponse).Pages
+ results = r.Result().(*StackscriptsPagedResponse).Results
+ v.appendData(r.Result().(*StackscriptsPagedResponse))
+ }
+ case *InstancesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(InstancesPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*InstancesPagedResponse).Pages
+ results = r.Result().(*InstancesPagedResponse).Results
+ v.appendData(r.Result().(*InstancesPagedResponse))
+ }
+ case *RegionsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(RegionsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*RegionsPagedResponse).Pages
+ results = r.Result().(*RegionsPagedResponse).Results
+ v.appendData(r.Result().(*RegionsPagedResponse))
+ }
+ case *VolumesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(VolumesPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*VolumesPagedResponse).Pages
+ results = r.Result().(*VolumesPagedResponse).Results
+ v.appendData(r.Result().(*VolumesPagedResponse))
+ }
+ case *DomainsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(DomainsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ response, ok := r.Result().(*DomainsPagedResponse)
+ if !ok {
+ return fmt.Errorf("response is not a *DomainsPagedResponse")
+ }
+ pages = response.Pages
+ results = response.Results
+ v.appendData(response)
+ }
+ case *EventsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(EventsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*EventsPagedResponse).Pages
+ results = r.Result().(*EventsPagedResponse).Results
+ v.appendData(r.Result().(*EventsPagedResponse))
+ }
+ case *FirewallsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(FirewallsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*FirewallsPagedResponse).Pages
+ results = r.Result().(*FirewallsPagedResponse).Results
+ v.appendData(r.Result().(*FirewallsPagedResponse))
+ }
+ case *LKEClustersPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LKEClustersPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*LKEClustersPagedResponse).Pages
+ results = r.Result().(*LKEClustersPagedResponse).Results
+ v.appendData(r.Result().(*LKEClustersPagedResponse))
+ }
+ case *LKEVersionsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LKEVersionsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*LKEVersionsPagedResponse).Pages
+ results = r.Result().(*LKEVersionsPagedResponse).Results
+ v.appendData(r.Result().(*LKEVersionsPagedResponse))
+ }
+ case *LongviewSubscriptionsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LongviewSubscriptionsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*LongviewSubscriptionsPagedResponse).Pages
+ results = r.Result().(*LongviewSubscriptionsPagedResponse).Results
+ v.appendData(r.Result().(*LongviewSubscriptionsPagedResponse))
+ }
+ case *LongviewClientsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LongviewClientsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*LongviewClientsPagedResponse).Pages
+ results = r.Result().(*LongviewClientsPagedResponse).Results
+ v.appendData(r.Result().(*LongviewClientsPagedResponse))
+ }
+ case *IPAddressesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(IPAddressesPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*IPAddressesPagedResponse).Pages
+ results = r.Result().(*IPAddressesPagedResponse).Results
+ v.appendData(r.Result().(*IPAddressesPagedResponse))
+ }
+ case *IPv6PoolsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(IPv6PoolsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*IPv6PoolsPagedResponse).Pages
+ results = r.Result().(*IPv6PoolsPagedResponse).Results
+ v.appendData(r.Result().(*IPv6PoolsPagedResponse))
+ }
+ case *IPv6RangesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(IPv6RangesPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*IPv6RangesPagedResponse).Pages
+ results = r.Result().(*IPv6RangesPagedResponse).Results
+ v.appendData(r.Result().(*IPv6RangesPagedResponse))
+ // @TODO consolidate this type with IPv6PoolsPagedResponse?
+ }
+ case *SSHKeysPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(SSHKeysPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ response, ok := r.Result().(*SSHKeysPagedResponse)
+ if !ok {
+ return fmt.Errorf("response is not a *SSHKeysPagedResponse")
+ }
+ pages = response.Pages
+ results = response.Results
+ v.appendData(response)
+ }
+ case *TicketsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(TicketsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*TicketsPagedResponse).Pages
+ results = r.Result().(*TicketsPagedResponse).Results
+ v.appendData(r.Result().(*TicketsPagedResponse))
+ }
+ case *InvoicesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(InvoicesPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*InvoicesPagedResponse).Pages
+ results = r.Result().(*InvoicesPagedResponse).Results
+ v.appendData(r.Result().(*InvoicesPagedResponse))
+ }
+ case *NotificationsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(NotificationsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*NotificationsPagedResponse).Pages
+ results = r.Result().(*NotificationsPagedResponse).Results
+ v.appendData(r.Result().(*NotificationsPagedResponse))
+ }
+ case *OAuthClientsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(OAuthClientsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*OAuthClientsPagedResponse).Pages
+ results = r.Result().(*OAuthClientsPagedResponse).Results
+ v.appendData(r.Result().(*OAuthClientsPagedResponse))
+ }
+ case *PaymentsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(PaymentsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*PaymentsPagedResponse).Pages
+ results = r.Result().(*PaymentsPagedResponse).Results
+ v.appendData(r.Result().(*PaymentsPagedResponse))
+ }
+ case *NodeBalancersPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(NodeBalancersPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*NodeBalancersPagedResponse).Pages
+ results = r.Result().(*NodeBalancersPagedResponse).Results
+ v.appendData(r.Result().(*NodeBalancersPagedResponse))
+ }
+ case *TagsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(TagsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*TagsPagedResponse).Pages
+ results = r.Result().(*TagsPagedResponse).Results
+ v.appendData(r.Result().(*TagsPagedResponse))
+ }
+ case *TokensPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(TokensPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*TokensPagedResponse).Pages
+ results = r.Result().(*TokensPagedResponse).Results
+ v.appendData(r.Result().(*TokensPagedResponse))
+ }
+ case *UsersPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(UsersPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*UsersPagedResponse).Pages
+ results = r.Result().(*UsersPagedResponse).Results
+ v.appendData(r.Result().(*UsersPagedResponse))
+ }
+ case *ObjectStorageBucketsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(ObjectStorageBucketsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*ObjectStorageBucketsPagedResponse).Pages
+ results = r.Result().(*ObjectStorageBucketsPagedResponse).Results
+ v.appendData(r.Result().(*ObjectStorageBucketsPagedResponse))
+ }
+ case *ObjectStorageClustersPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(ObjectStorageClustersPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*ObjectStorageClustersPagedResponse).Pages
+ results = r.Result().(*ObjectStorageClustersPagedResponse).Results
+ v.appendData(r.Result().(*ObjectStorageClustersPagedResponse))
+ }
+ case *ObjectStorageKeysPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(ObjectStorageKeysPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*ObjectStorageKeysPagedResponse).Pages
+ results = r.Result().(*ObjectStorageKeysPagedResponse).Results
+ v.appendData(r.Result().(*ObjectStorageKeysPagedResponse))
+ }
+ case *VLANsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(VLANsPagedResponse{}).Get(v.endpoint(c))); err == nil {
+ pages = r.Result().(*VLANsPagedResponse).Pages
+ results = r.Result().(*VLANsPagedResponse).Results
+ v.appendData(r.Result().(*VLANsPagedResponse))
+ }
+ /**
+ case ProfileAppsPagedResponse:
+ case ProfileWhitelistPagedResponse:
+ case ManagedContactsPagedResponse:
+ case ManagedCredentialsPagedResponse:
+ case ManagedIssuesPagedResponse:
+ case ManagedLinodeSettingsPagedResponse:
+ case ManagedServicesPagedResponse:
+ **/
+ default:
+ log.Fatalf("listHelper interface{} %+v used", i)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if opts == nil {
+ for page := 2; page <= pages; page++ {
+ if err := c.listHelper(ctx, i, &ListOptions{PageOptions: &PageOptions{Page: page}}); err != nil {
+ return err
+ }
+ }
+ } else {
+ if opts.PageOptions == nil {
+ opts.PageOptions = &PageOptions{}
+ }
+
+ if opts.Page == 0 {
+ for page := 2; page <= pages; page++ {
+ opts.Page = page
+ if err := c.listHelper(ctx, i, opts); err != nil {
+ return err
+ }
+ }
+ }
+ opts.Results = results
+ opts.Pages = pages
+ }
+
+ return nil
+}
+
+// listHelperWithID abstracts fetching and pagination for GET endpoints that
+// require an Id (second level endpoints).
+// When opts (or opts.Page) is nil, all pages will be fetched and
+// returned in a single (endpoint-specific)PagedResponse
+// opts.results and opts.pages will be updated from the API response
+// nolint
+func (c *Client) listHelperWithID(ctx context.Context, i interface{}, idRaw interface{}, opts *ListOptions) error {
+ var (
+ err error
+ pages int
+ results int
+ r *resty.Response
+ )
+
+ req := c.R(ctx)
+ applyListOptionsToRequest(opts, req)
+
+ id, _ := idRaw.(int)
+
+ switch v := i.(type) {
+ case *DomainRecordsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(DomainRecordsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ response, ok := r.Result().(*DomainRecordsPagedResponse)
+ if !ok {
+ return fmt.Errorf("response is not a *DomainRecordsPagedResponse")
+ }
+ pages = response.Pages
+ results = response.Results
+ v.appendData(response)
+ }
+ case *FirewallDevicesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(FirewallDevicesPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*FirewallDevicesPagedResponse).Pages
+ results = r.Result().(*FirewallDevicesPagedResponse).Results
+ v.appendData(r.Result().(*FirewallDevicesPagedResponse))
+ }
+ case *InstanceConfigsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(InstanceConfigsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*InstanceConfigsPagedResponse).Pages
+ results = r.Result().(*InstanceConfigsPagedResponse).Results
+ v.appendData(r.Result().(*InstanceConfigsPagedResponse))
+ }
+ case *InstanceDisksPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(InstanceDisksPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*InstanceDisksPagedResponse).Pages
+ results = r.Result().(*InstanceDisksPagedResponse).Results
+ v.appendData(r.Result().(*InstanceDisksPagedResponse))
+ }
+ case *InstanceVolumesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(InstanceVolumesPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*InstanceVolumesPagedResponse).Pages
+ results = r.Result().(*InstanceVolumesPagedResponse).Results
+ v.appendData(r.Result().(*InstanceVolumesPagedResponse))
+ }
+ case *InvoiceItemsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(InvoiceItemsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*InvoiceItemsPagedResponse).Pages
+ results = r.Result().(*InvoiceItemsPagedResponse).Results
+ v.appendData(r.Result().(*InvoiceItemsPagedResponse))
+ }
+ case *LKEClusterAPIEndpointsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LKEClusterAPIEndpointsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*LKEClusterAPIEndpointsPagedResponse).Pages
+ results = r.Result().(*LKEClusterAPIEndpointsPagedResponse).Results
+ v.appendData(r.Result().(*LKEClusterAPIEndpointsPagedResponse))
+ }
+ case *LKEClusterPoolsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(LKEClusterPoolsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*LKEClusterPoolsPagedResponse).Pages
+ results = r.Result().(*LKEClusterPoolsPagedResponse).Results
+ v.appendData(r.Result().(*LKEClusterPoolsPagedResponse))
+ }
+ case *NodeBalancerConfigsPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(NodeBalancerConfigsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
+ pages = r.Result().(*NodeBalancerConfigsPagedResponse).Pages
+ results = r.Result().(*NodeBalancerConfigsPagedResponse).Results
+ v.appendData(r.Result().(*NodeBalancerConfigsPagedResponse))
+ }
+ case *TaggedObjectsPagedResponse:
+ idStr := idRaw.(string)
+
+ if r, err = coupleAPIErrors(req.SetResult(TaggedObjectsPagedResponse{}).Get(v.endpointWithID(c, idStr))); err == nil {
+ pages = r.Result().(*TaggedObjectsPagedResponse).Pages
+ results = r.Result().(*TaggedObjectsPagedResponse).Results
+ v.appendData(r.Result().(*TaggedObjectsPagedResponse))
+ }
+ /**
+ case TicketAttachmentsPagedResponse:
+ if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
+ return NewError(r)
+ } else if err == nil {
+ pages = r.Result().(*TicketAttachmentsPagedResponse).Pages
+ results = r.Result().(*TicketAttachmentsPagedResponse).Results
+ v.appendData(r.Result().(*TicketAttachmentsPagedResponse))
+ }
+ case TicketRepliesPagedResponse:
+ if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
+ return NewError(r)
+ } else if err == nil {
+ pages = r.Result().(*TicketRepliesPagedResponse).Pages
+ results = r.Result().(*TicketRepliesPagedResponse).Results
+ v.appendData(r.Result().(*TicketRepliesPagedResponse))
+ }
+ **/
+ default:
+ log.Fatalf("Unknown listHelperWithID interface{} %T used", i)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if opts == nil {
+ for page := 2; page <= pages; page++ {
+ if err := c.listHelperWithID(ctx, i, id, &ListOptions{PageOptions: &PageOptions{Page: page}}); err != nil {
+ return err
+ }
+ }
+ } else {
+ if opts.PageOptions == nil {
+ opts.PageOptions = &PageOptions{}
+ }
+ if opts.Page == 0 {
+ for page := 2; page <= pages; page++ {
+ opts.Page = page
+ if err := c.listHelperWithID(ctx, i, id, opts); err != nil {
+ return err
+ }
+ }
+ }
+ opts.Results = results
+ opts.Pages = pages
+ }
+
+ return nil
+}
+
+// listHelperWithTwoIDs abstracts fetching and pagination for GET endpoints that
+// require twos IDs (third level endpoints).
+// When opts (or opts.Page) is nil, all pages will be fetched and
+// returned in a single (endpoint-specific)PagedResponse
+// opts.results and opts.pages will be updated from the API response
+// nolint
+func (c *Client) listHelperWithTwoIDs(ctx context.Context, i interface{}, firstID, secondID int, opts *ListOptions) error {
+ var (
+ err error
+ pages int
+ results int
+ r *resty.Response
+ )
+
+ req := c.R(ctx)
+ applyListOptionsToRequest(opts, req)
+
+ switch v := i.(type) {
+ case *NodeBalancerNodesPagedResponse:
+ if r, err = coupleAPIErrors(req.SetResult(NodeBalancerNodesPagedResponse{}).Get(v.endpointWithTwoIDs(c, firstID, secondID))); err == nil {
+ pages = r.Result().(*NodeBalancerNodesPagedResponse).Pages
+ results = r.Result().(*NodeBalancerNodesPagedResponse).Results
+ v.appendData(r.Result().(*NodeBalancerNodesPagedResponse))
+ }
+ default:
+ log.Fatalf("Unknown listHelperWithTwoIDs interface{} %T used", i)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if opts == nil {
+ for page := 2; page <= pages; page++ {
+ if err := c.listHelper(ctx, i, &ListOptions{PageOptions: &PageOptions{Page: page}}); err != nil {
+ return err
+ }
+ }
+ } else {
+ if opts.PageOptions == nil {
+ opts.PageOptions = &PageOptions{}
+ }
+ if opts.Page == 0 {
+ for page := 2; page <= pages; page++ {
+ opts.Page = page
+ if err := c.listHelperWithTwoIDs(ctx, i, firstID, secondID, opts); err != nil {
+ return err
+ }
+ }
+ }
+ opts.Results = results
+ opts.Pages = pages
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/linode/linodego/profile.go b/vendor/github.com/linode/linodego/profile.go
new file mode 100644
index 000000000..33b700950
--- /dev/null
+++ b/vendor/github.com/linode/linodego/profile.go
@@ -0,0 +1,116 @@
+package linodego
+
+/*
+ - copy profile_test.go and do the same
+ - When updating Profile structs,
+ - use pointers where ever null'able would have a different meaning if the wrapper
+ supplied "" or 0 instead
+ - Add "NameOfResource" to client.go, resources.go, pagination.go
+*/
+
+import (
+ "context"
+ "encoding/json"
+)
+
+// LishAuthMethod constants start with AuthMethod and include Linode API Lish Authentication Methods
+type LishAuthMethod string
+
+// LishAuthMethod constants are the methods of authentication allowed when connecting via Lish
+const (
+ AuthMethodPasswordKeys LishAuthMethod = "password_keys"
+ AuthMethodKeysOnly LishAuthMethod = "keys_only"
+ AuthMethodDisabled LishAuthMethod = "disabled"
+)
+
+// ProfileReferrals represent a User's status in the Referral Program
+type ProfileReferrals struct {
+ Total int `json:"total"`
+ Completed int `json:"completed"`
+ Pending int `json:"pending"`
+ Credit float64 `json:"credit"`
+ Code string `json:"code"`
+ URL string `json:"url"`
+}
+
+// Profile represents a Profile object
+type Profile struct {
+ UID int `json:"uid"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Timezone string `json:"timezone"`
+ EmailNotifications bool `json:"email_notifications"`
+ IPWhitelistEnabled bool `json:"ip_whitelist_enabled"`
+ TwoFactorAuth bool `json:"two_factor_auth"`
+ Restricted bool `json:"restricted"`
+ LishAuthMethod LishAuthMethod `json:"lish_auth_method"`
+ Referrals ProfileReferrals `json:"referrals"`
+ AuthorizedKeys []string `json:"authorized_keys"`
+}
+
+// ProfileUpdateOptions fields are those accepted by UpdateProfile
+type ProfileUpdateOptions struct {
+ Email string `json:"email,omitempty"`
+ Timezone string `json:"timezone,omitempty"`
+ EmailNotifications *bool `json:"email_notifications,omitempty"`
+ IPWhitelistEnabled *bool `json:"ip_whitelist_enabled,omitempty"`
+ LishAuthMethod LishAuthMethod `json:"lish_auth_method,omitempty"`
+ AuthorizedKeys *[]string `json:"authorized_keys,omitempty"`
+ TwoFactorAuth *bool `json:"two_factor_auth,omitempty"`
+ Restricted *bool `json:"restricted,omitempty"`
+}
+
+// GetUpdateOptions converts a Profile to ProfileUpdateOptions for use in UpdateProfile
+func (i Profile) GetUpdateOptions() (o ProfileUpdateOptions) {
+ o.Email = i.Email
+ o.Timezone = i.Timezone
+ o.EmailNotifications = copyBool(&i.EmailNotifications)
+ o.IPWhitelistEnabled = copyBool(&i.IPWhitelistEnabled)
+ o.LishAuthMethod = i.LishAuthMethod
+ authorizedKeys := make([]string, len(i.AuthorizedKeys))
+ copy(authorizedKeys, i.AuthorizedKeys)
+ o.AuthorizedKeys = &authorizedKeys
+ o.TwoFactorAuth = copyBool(&i.TwoFactorAuth)
+ o.Restricted = copyBool(&i.Restricted)
+
+ return
+}
+
+// GetProfile returns the Profile of the authenticated user
+func (c *Client) GetProfile(ctx context.Context) (*Profile, error) {
+ e, err := c.Profile.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Profile{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Profile), nil
+}
+
+// UpdateProfile updates the Profile with the specified id
+func (c *Client) UpdateProfile(ctx context.Context, updateOpts ProfileUpdateOptions) (*Profile, error) {
+ var body string
+ e, err := c.Profile.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Profile{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Profile), nil
+}
diff --git a/vendor/github.com/linode/linodego/profile_sshkeys.go b/vendor/github.com/linode/linodego/profile_sshkeys.go
new file mode 100644
index 000000000..4e8291141
--- /dev/null
+++ b/vendor/github.com/linode/linodego/profile_sshkeys.go
@@ -0,0 +1,169 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// SSHKey represents a SSHKey object
+type SSHKey struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ SSHKey string `json:"ssh_key"`
+ Created *time.Time `json:"-"`
+}
+
+// SSHKeyCreateOptions fields are those accepted by CreateSSHKey
+type SSHKeyCreateOptions struct {
+ Label string `json:"label"`
+ SSHKey string `json:"ssh_key"`
+}
+
+// SSHKeyUpdateOptions fields are those accepted by UpdateSSHKey
+type SSHKeyUpdateOptions struct {
+ Label string `json:"label"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *SSHKey) UnmarshalJSON(b []byte) error {
+ type Mask SSHKey
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+
+ return nil
+}
+
+// GetCreateOptions converts a SSHKey to SSHKeyCreateOptions for use in CreateSSHKey
+func (i SSHKey) GetCreateOptions() (o SSHKeyCreateOptions) {
+ o.Label = i.Label
+ o.SSHKey = i.SSHKey
+ return
+}
+
+// GetUpdateOptions converts a SSHKey to SSHKeyCreateOptions for use in UpdateSSHKey
+func (i SSHKey) GetUpdateOptions() (o SSHKeyUpdateOptions) {
+ o.Label = i.Label
+ return
+}
+
+// SSHKeysPagedResponse represents a paginated SSHKey API response
+type SSHKeysPagedResponse struct {
+ *PageOptions
+ Data []SSHKey `json:"data"`
+}
+
+// endpoint gets the endpoint URL for SSHKey
+func (SSHKeysPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.SSHKeys.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends SSHKeys when processing paginated SSHKey responses
+func (resp *SSHKeysPagedResponse) appendData(r *SSHKeysPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListSSHKeys lists SSHKeys
+func (c *Client) ListSSHKeys(ctx context.Context, opts *ListOptions) ([]SSHKey, error) {
+ response := SSHKeysPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetSSHKey gets the sshkey with the provided ID
+func (c *Client) GetSSHKey(ctx context.Context, id int) (*SSHKey, error) {
+ e, err := c.SSHKeys.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&SSHKey{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*SSHKey), nil
+}
+
+// CreateSSHKey creates a SSHKey
+func (c *Client) CreateSSHKey(ctx context.Context, createOpts SSHKeyCreateOptions) (*SSHKey, error) {
+ var body string
+ e, err := c.SSHKeys.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&SSHKey{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*SSHKey), nil
+}
+
+// UpdateSSHKey updates the SSHKey with the specified id
+func (c *Client) UpdateSSHKey(ctx context.Context, id int, updateOpts SSHKeyUpdateOptions) (*SSHKey, error) {
+ var body string
+ e, err := c.SSHKeys.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&SSHKey{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*SSHKey), nil
+}
+
+// DeleteSSHKey deletes the SSHKey with the specified id
+func (c *Client) DeleteSSHKey(ctx context.Context, id int) error {
+ e, err := c.SSHKeys.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/profile_tokens.go b/vendor/github.com/linode/linodego/profile_tokens.go
new file mode 100644
index 000000000..d097f0f3f
--- /dev/null
+++ b/vendor/github.com/linode/linodego/profile_tokens.go
@@ -0,0 +1,206 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// Token represents a Token object
+type Token struct {
+ // This token's unique ID, which can be used to revoke it.
+ ID int `json:"id"`
+
+ // The scopes this token was created with. These define what parts of the Account the token can be used to access. Many command-line tools, such as the Linode CLI, require tokens with access to *. Tokens with more restrictive scopes are generally more secure.
+ // Valid values are "*" or a comma separated list of scopes https://developers.linode.com/api/v4/#o-auth
+ Scopes string `json:"scopes"`
+
+ // This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
+ Label string `json:"label"`
+
+ // The token used to access the API. When the token is created, the full token is returned here. Otherwise, only the first 16 characters are returned.
+ Token string `json:"token"`
+
+ // The date and time this token was created.
+ Created *time.Time `json:"-"`
+
+ // When this token will expire. Personal Access Tokens cannot be renewed, so after this time the token will be completely unusable and a new token will need to be generated. Tokens may be created with "null" as their expiry and will never expire unless revoked.
+ Expiry *time.Time `json:"-"`
+}
+
+// TokenCreateOptions fields are those accepted by CreateToken
+type TokenCreateOptions struct {
+ // The scopes this token was created with. These define what parts of the Account the token can be used to access. Many command-line tools, such as the Linode CLI, require tokens with access to *. Tokens with more restrictive scopes are generally more secure.
+ Scopes string `json:"scopes"`
+
+ // This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
+ Label string `json:"label"`
+
+ // When this token will expire. Personal Access Tokens cannot be renewed, so after this time the token will be completely unusable and a new token will need to be generated. Tokens may be created with "null" as their expiry and will never expire unless revoked.
+ Expiry *time.Time `json:"expiry"`
+}
+
+// TokenUpdateOptions fields are those accepted by UpdateToken
+type TokenUpdateOptions struct {
+ // This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
+ Label string `json:"label"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Token) UnmarshalJSON(b []byte) error {
+ type Mask Token
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Expiry *parseabletime.ParseableTime `json:"expiry"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Expiry = (*time.Time)(p.Expiry)
+
+ return nil
+}
+
+// GetCreateOptions converts a Token to TokenCreateOptions for use in CreateToken
+func (i Token) GetCreateOptions() (o TokenCreateOptions) {
+ o.Label = i.Label
+ o.Expiry = copyTime(i.Expiry)
+ o.Scopes = i.Scopes
+ return
+}
+
+// GetUpdateOptions converts a Token to TokenUpdateOptions for use in UpdateToken
+func (i Token) GetUpdateOptions() (o TokenUpdateOptions) {
+ o.Label = i.Label
+ return
+}
+
+// TokensPagedResponse represents a paginated Token API response
+type TokensPagedResponse struct {
+ *PageOptions
+ Data []Token `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Token
+func (TokensPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Tokens.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends Tokens when processing paginated Token responses
+func (resp *TokensPagedResponse) appendData(r *TokensPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListTokens lists Tokens
+func (c *Client) ListTokens(ctx context.Context, opts *ListOptions) ([]Token, error) {
+ response := TokensPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetToken gets the token with the provided ID
+func (c *Client) GetToken(ctx context.Context, id int) (*Token, error) {
+ e, err := c.Tokens.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Token{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Token), nil
+}
+
+// CreateToken creates a Token
+func (c *Client) CreateToken(ctx context.Context, createOpts TokenCreateOptions) (*Token, error) {
+ var body string
+ e, err := c.Tokens.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Token{})
+
+ // Format the Time as a string to meet the ISO8601 requirement
+ createOptsFixed := struct {
+ Label string `json:"label"`
+ Scopes string `json:"scopes"`
+ Expiry *string `json:"expiry"`
+ }{}
+ createOptsFixed.Label = createOpts.Label
+ createOptsFixed.Scopes = createOpts.Scopes
+ if createOpts.Expiry != nil {
+ iso8601Expiry := createOpts.Expiry.UTC().Format("2006-01-02T15:04:05")
+ createOptsFixed.Expiry = &iso8601Expiry
+ }
+
+ if bodyData, err := json.Marshal(createOptsFixed); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Token), nil
+}
+
+// UpdateToken updates the Token with the specified id
+func (c *Client) UpdateToken(ctx context.Context, id int, updateOpts TokenUpdateOptions) (*Token, error) {
+ var body string
+ e, err := c.Tokens.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&Token{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Token), nil
+}
+
+// DeleteToken deletes the Token with the specified id
+func (c *Client) DeleteToken(ctx context.Context, id int) error {
+ e, err := c.Tokens.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/regions.go b/vendor/github.com/linode/linodego/regions.go
new file mode 100644
index 000000000..9d037a7f2
--- /dev/null
+++ b/vendor/github.com/linode/linodego/regions.go
@@ -0,0 +1,56 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// Region represents a linode region object
+type Region struct {
+ ID string `json:"id"`
+ Country string `json:"country"`
+}
+
+// RegionsPagedResponse represents a linode API response for listing
+type RegionsPagedResponse struct {
+ *PageOptions
+ Data []Region `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Region
+func (RegionsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Regions.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends Regions when processing paginated Region responses
+func (resp *RegionsPagedResponse) appendData(r *RegionsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListRegions lists Regions
+func (c *Client) ListRegions(ctx context.Context, opts *ListOptions) ([]Region, error) {
+ response := RegionsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetRegion gets the template with the provided ID
+func (c *Client) GetRegion(ctx context.Context, id string) (*Region, error) {
+ e, err := c.Regions.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Region{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Region), nil
+}
diff --git a/vendor/github.com/linode/linodego/resources.go b/vendor/github.com/linode/linodego/resources.go
new file mode 100644
index 000000000..b0bfccf0f
--- /dev/null
+++ b/vendor/github.com/linode/linodego/resources.go
@@ -0,0 +1,196 @@
+package linodego
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "text/template"
+
+ "github.com/go-resty/resty/v2"
+)
+
+const (
+ accountName = "account"
+ accountSettingsName = "accountsettings"
+ domainRecordsName = "records"
+ domainsName = "domains"
+ eventsName = "events"
+ firewallsName = "firewalls"
+ firewallDevicesName = "firewalldevices"
+ firewallRulesName = "firewallrules"
+ imagesName = "images"
+ instanceConfigsName = "configs"
+ instanceDisksName = "disks"
+ instanceIPsName = "ips"
+ instanceSnapshotsName = "snapshots"
+ instanceStatsName = "instancestats"
+ instanceVolumesName = "instancevolumes"
+ instancesName = "instances"
+ invoiceItemsName = "invoiceitems"
+ invoicesName = "invoices"
+ ipaddressesName = "ipaddresses"
+ ipv6poolsName = "ipv6pools"
+ ipv6rangesName = "ipv6ranges"
+ kernelsName = "kernels"
+ lkeClusterAPIEndpointsName = "lkeclusterapiendpoints"
+ lkeClustersName = "lkeclusters"
+ lkeClusterPoolsName = "lkeclusterpools"
+ lkeVersionsName = "lkeversions"
+ longviewName = "longview"
+ longviewclientsName = "longviewclients"
+ longviewsubscriptionsName = "longviewsubscriptions"
+ managedName = "managed"
+ nodebalancerconfigsName = "nodebalancerconfigs"
+ nodebalancernodesName = "nodebalancernodes"
+ nodebalancerStatsName = "nodebalancerstats"
+ nodebalancersName = "nodebalancers"
+ notificationsName = "notifications"
+ oauthClientsName = "oauthClients"
+ objectStorageBucketsName = "objectstoragebuckets"
+ objectStorageBucketCertsName = "objectstoragebucketcerts"
+ objectStorageClustersName = "objectstorageclusters"
+ objectStorageKeysName = "objectstoragekeys"
+ paymentsName = "payments"
+ profileName = "profile"
+ regionsName = "regions"
+ sshkeysName = "sshkeys"
+ stackscriptsName = "stackscripts"
+ tagsName = "tags"
+ ticketsName = "tickets"
+ tokensName = "tokens"
+ typesName = "types"
+ userGrantsName = "usergrants"
+ usersName = "users"
+ volumesName = "volumes"
+ vlansName = "vlans"
+
+ accountEndpoint = "account"
+ accountSettingsEndpoint = "account/settings"
+ domainRecordsEndpoint = "domains/{{ .ID }}/records"
+ domainsEndpoint = "domains"
+ eventsEndpoint = "account/events"
+ firewallsEndpoint = "networking/firewalls"
+ firewallDevicesEndpoint = "networking/firewalls/{{ .ID }}/devices"
+ firewallRulesEndpoint = "networking/firewalls/{{ .ID }}/rules"
+ imagesEndpoint = "images"
+ instanceConfigsEndpoint = "linode/instances/{{ .ID }}/configs"
+ instanceDisksEndpoint = "linode/instances/{{ .ID }}/disks"
+ instanceIPsEndpoint = "linode/instances/{{ .ID }}/ips"
+ instanceSnapshotsEndpoint = "linode/instances/{{ .ID }}/backups"
+ instanceStatsEndpoint = "linode/instances/{{ .ID }}/stats"
+ instanceVolumesEndpoint = "linode/instances/{{ .ID }}/volumes"
+ instancesEndpoint = "linode/instances"
+ invoiceItemsEndpoint = "account/invoices/{{ .ID }}/items"
+ invoicesEndpoint = "account/invoices"
+ ipaddressesEndpoint = "networking/ips"
+ ipv6poolsEndpoint = "networking/ipv6/pools"
+ ipv6rangesEndpoint = "networking/ipv6/ranges"
+ kernelsEndpoint = "linode/kernels"
+ lkeClustersEndpoint = "lke/clusters"
+ lkeClusterAPIEndpointsEndpoint = "lke/clusters/{{ .ID }}/api-endpoints"
+ lkeClusterPoolsEndpoint = "lke/clusters/{{ .ID }}/pools"
+ lkeVersionsEndpoint = "lke/versions"
+ longviewEndpoint = "longview"
+ longviewclientsEndpoint = "longview/clients"
+ longviewsubscriptionsEndpoint = "longview/subscriptions"
+ managedEndpoint = "managed"
+ // @TODO we can't use these nodebalancer endpoints unless we include these templated fields
+ // The API seems inconsistent about including parent IDs in objects, (compare instance configs to nb configs)
+ // Parent IDs would be immutable for updates and are ignored in create requests ..
+ // Should we include these fields in CreateOpts and UpdateOpts?
+ nodebalancerconfigsEndpoint = "nodebalancers/{{ .ID }}/configs"
+ nodebalancernodesEndpoint = "nodebalancers/{{ .ID }}/configs/{{ .SecondID }}/nodes"
+ nodebalancerStatsEndpoint = "nodebalancers/{{ .ID }}/stats"
+ nodebalancersEndpoint = "nodebalancers"
+ notificationsEndpoint = "account/notifications"
+ oauthClientsEndpoint = "account/oauth-clients"
+ objectStorageBucketsEndpoint = "object-storage/buckets"
+ objectStorageBucketCertsEndpoint = "object-storage/buckets/{{ .ID }}/{{ .SecondID }}/ssl"
+ objectStorageClustersEndpoint = "object-storage/clusters"
+ objectStorageKeysEndpoint = "object-storage/keys"
+ paymentsEndpoint = "account/payments"
+ profileEndpoint = "profile"
+ regionsEndpoint = "regions"
+ sshkeysEndpoint = "profile/sshkeys"
+ stackscriptsEndpoint = "linode/stackscripts"
+ tagsEndpoint = "tags"
+ ticketsEndpoint = "support/tickets"
+ tokensEndpoint = "profile/tokens"
+ typesEndpoint = "linode/types"
+ userGrantsEndpoint = "account/users/{{ .ID }}/grants"
+ usersEndpoint = "account/users"
+ vlansEndpoint = "networking/vlans"
+ volumesEndpoint = "volumes"
+)
+
+// Resource represents a linode API resource
+type Resource struct {
+ name string
+ endpoint string
+ isTemplate bool
+ endpointTemplate *template.Template
+ R func(ctx context.Context) *resty.Request
+ PR func(ctx context.Context) *resty.Request
+}
+
+// NewResource is the factory to create a new Resource struct. If it has a template string the useTemplate bool must be set.
+func NewResource(client *Client, name string, endpoint string, useTemplate bool, singleType interface{}, pagedType interface{}) *Resource {
+ var tmpl *template.Template
+
+ if useTemplate {
+ tmpl = template.Must(template.New(name).Parse(endpoint))
+ }
+
+ r := func(ctx context.Context) *resty.Request {
+ return client.R(ctx).SetResult(singleType)
+ }
+
+ pr := func(ctx context.Context) *resty.Request {
+ return client.R(ctx).SetResult(pagedType)
+ }
+
+ return &Resource{name, endpoint, useTemplate, tmpl, r, pr}
+}
+
+func (r Resource) render(data ...interface{}) (string, error) {
+ if data == nil {
+ return "", NewError("Cannot template endpoint with data")
+ }
+ out := ""
+ buf := bytes.NewBufferString(out)
+
+ var substitutions interface{}
+
+ switch len(data) {
+ case 1:
+ substitutions = struct{ ID interface{} }{data[0]}
+ case 2:
+ substitutions = struct {
+ ID interface{}
+ SecondID interface{}
+ }{data[0], data[1]}
+ default:
+ return "", NewError("Too many arguments to render template (expected 1 or 2)")
+ }
+
+ if err := r.endpointTemplate.Execute(buf, substitutions); err != nil {
+ return "", NewError(err)
+ }
+ return buf.String(), nil
+}
+
+// endpointWithParams will return the rendered endpoint string for the resource with provided parameters
+func (r Resource) endpointWithParams(params ...interface{}) (string, error) {
+ if !r.isTemplate {
+ return r.endpoint, nil
+ }
+ return r.render(params...)
+}
+
+// Endpoint will return the non-templated endpoint string for resource
+func (r Resource) Endpoint() (string, error) {
+ if r.isTemplate {
+ return "", NewError(fmt.Sprintf("Tried to get endpoint for %s without providing data for template", r.name))
+ }
+ return r.endpoint, nil
+}
diff --git a/vendor/github.com/linode/linodego/retries.go b/vendor/github.com/linode/linodego/retries.go
new file mode 100644
index 000000000..2a48bb818
--- /dev/null
+++ b/vendor/github.com/linode/linodego/retries.go
@@ -0,0 +1,78 @@
+package linodego
+
+import (
+ "log"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/go-resty/resty/v2"
+)
+
+const retryAfterHeaderName = "Retry-After"
+
+// type RetryConditional func(r *resty.Response) (shouldRetry bool)
+type RetryConditional resty.RetryConditionFunc
+
+// type RetryAfter func(c *resty.Client, r *resty.Response) (time.Duration, error)
+type RetryAfter resty.RetryAfterFunc
+
+// Configures resty to
+// lock until enough time has passed to retry the request as determined by the Retry-After response header.
+// If the Retry-After header is not set, we fall back to value of SetPollDelay.
+func configureRetries(c *Client) {
+ c.resty.
+ SetRetryCount(1000).
+ AddRetryCondition(checkRetryConditionals(c)).
+ SetRetryAfter(respectRetryAfter)
+}
+
+func checkRetryConditionals(c *Client) func(*resty.Response, error) bool {
+ return func(r *resty.Response, err error) bool {
+ for _, retryConditional := range c.retryConditionals {
+ retry := retryConditional(r, err)
+ if retry {
+ log.Printf("[INFO] Received error %s - Retrying", r.Error())
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// SetLinodeBusyRetry configures resty to retry specifically on "Linode busy." errors
+// The retry wait time is configured in SetPollDelay
+func linodeBusyRetryCondition(r *resty.Response, _ error) bool {
+ apiError, ok := r.Error().(*APIError)
+ linodeBusy := ok && apiError.Error() == "Linode busy."
+ retry := r.StatusCode() == http.StatusBadRequest && linodeBusy
+ return retry
+}
+
+func tooManyRequestsRetryCondition(r *resty.Response, _ error) bool {
+ return r.StatusCode() == http.StatusTooManyRequests
+}
+
+func serviceUnavailableRetryCondition(r *resty.Response, _ error) bool {
+ return r.StatusCode() == http.StatusServiceUnavailable
+}
+
+func requestTimeoutRetryCondition(r *resty.Response, _ error) bool {
+ return r.StatusCode() == http.StatusRequestTimeout
+}
+
+func respectRetryAfter(client *resty.Client, resp *resty.Response) (time.Duration, error) {
+ retryAfterStr := resp.Header().Get(retryAfterHeaderName)
+ if retryAfterStr == "" {
+ return 0, nil
+ }
+
+ retryAfter, err := strconv.Atoi(retryAfterStr)
+ if err != nil {
+ return 0, err
+ }
+
+ duration := time.Duration(retryAfter) * time.Second
+ log.Printf("[INFO] Respecting Retry-After Header of %d (%s) (max %s)", retryAfter, duration, client.RetryMaxWaitTime)
+ return duration, nil
+}
diff --git a/vendor/github.com/linode/linodego/stackscripts.go b/vendor/github.com/linode/linodego/stackscripts.go
new file mode 100644
index 000000000..06591575f
--- /dev/null
+++ b/vendor/github.com/linode/linodego/stackscripts.go
@@ -0,0 +1,220 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// Stackscript represents a Linode StackScript
+type Stackscript struct {
+ ID int `json:"id"`
+ Username string `json:"username"`
+ Label string `json:"label"`
+ Description string `json:"description"`
+ Ordinal int `json:"ordinal"`
+ LogoURL string `json:"logo_url"`
+ Images []string `json:"images"`
+ DeploymentsTotal int `json:"deployments_total"`
+ DeploymentsActive int `json:"deployments_active"`
+ IsPublic bool `json:"is_public"`
+ Mine bool `json:"mine"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+ RevNote string `json:"rev_note"`
+ Script string `json:"script"`
+ UserDefinedFields *[]StackscriptUDF `json:"user_defined_fields"`
+ UserGravatarID string `json:"user_gravatar_id"`
+}
+
+// StackscriptUDF define a single variable that is accepted by a Stackscript
+type StackscriptUDF struct {
+ // A human-readable label for the field that will serve as the input prompt for entering the value during deployment.
+ Label string `json:"label"`
+
+ // The name of the field.
+ Name string `json:"name"`
+
+ // An example value for the field.
+ Example string `json:"example"`
+
+ // A list of acceptable single values for the field.
+ OneOf string `json:"oneOf,omitempty"`
+
+ // A list of acceptable values for the field in any quantity, combination or order.
+ ManyOf string `json:"manyOf,omitempty"`
+
+ // The default value. If not specified, this value will be used.
+ Default string `json:"default,omitempty"`
+}
+
+// StackscriptCreateOptions fields are those accepted by CreateStackscript
+type StackscriptCreateOptions struct {
+ Label string `json:"label"`
+ Description string `json:"description"`
+ Images []string `json:"images"`
+ IsPublic bool `json:"is_public"`
+ RevNote string `json:"rev_note"`
+ Script string `json:"script"`
+}
+
+// StackscriptUpdateOptions fields are those accepted by UpdateStackscript
+type StackscriptUpdateOptions StackscriptCreateOptions
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (i *Stackscript) UnmarshalJSON(b []byte) error {
+ type Mask Stackscript
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(i),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ i.Created = (*time.Time)(p.Created)
+ i.Updated = (*time.Time)(p.Updated)
+
+ return nil
+}
+
+// GetCreateOptions converts a Stackscript to StackscriptCreateOptions for use in CreateStackscript
+func (i Stackscript) GetCreateOptions() StackscriptCreateOptions {
+ return StackscriptCreateOptions{
+ Label: i.Label,
+ Description: i.Description,
+ Images: i.Images,
+ IsPublic: i.IsPublic,
+ RevNote: i.RevNote,
+ Script: i.Script,
+ }
+}
+
+// GetUpdateOptions converts a Stackscript to StackscriptUpdateOptions for use in UpdateStackscript
+func (i Stackscript) GetUpdateOptions() StackscriptUpdateOptions {
+ return StackscriptUpdateOptions{
+ Label: i.Label,
+ Description: i.Description,
+ Images: i.Images,
+ IsPublic: i.IsPublic,
+ RevNote: i.RevNote,
+ Script: i.Script,
+ }
+}
+
+// StackscriptsPagedResponse represents a paginated Stackscript API response
+type StackscriptsPagedResponse struct {
+ *PageOptions
+ Data []Stackscript `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Stackscript
+func (StackscriptsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.StackScripts.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends Stackscripts when processing paginated Stackscript responses
+func (resp *StackscriptsPagedResponse) appendData(r *StackscriptsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListStackscripts lists Stackscripts
+func (c *Client) ListStackscripts(ctx context.Context, opts *ListOptions) ([]Stackscript, error) {
+ response := StackscriptsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetStackscript gets the Stackscript with the provided ID
+func (c *Client) GetStackscript(ctx context.Context, id int) (*Stackscript, error) {
+ e, err := c.StackScripts.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetResult(&Stackscript{}).
+ Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Stackscript), nil
+}
+
+// CreateStackscript creates a StackScript
+func (c *Client) CreateStackscript(ctx context.Context, createOpts StackscriptCreateOptions) (*Stackscript, error) {
+ var body string
+ e, err := c.StackScripts.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Stackscript{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Stackscript), nil
+}
+
+// UpdateStackscript updates the StackScript with the specified id
+func (c *Client) UpdateStackscript(ctx context.Context, id int, updateOpts StackscriptUpdateOptions) (*Stackscript, error) {
+ var body string
+ e, err := c.StackScripts.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&Stackscript{})
+
+ if bodyData, err := json.Marshal(updateOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Stackscript), nil
+}
+
+// DeleteStackscript deletes the StackScript with the specified id
+func (c *Client) DeleteStackscript(ctx context.Context, id int) error {
+ e, err := c.StackScripts.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/support.go b/vendor/github.com/linode/linodego/support.go
new file mode 100644
index 000000000..6c72a4adf
--- /dev/null
+++ b/vendor/github.com/linode/linodego/support.go
@@ -0,0 +1,88 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+ "time"
+)
+
+// Ticket represents a support ticket object
+type Ticket struct {
+ ID int `json:"id"`
+ Attachments []string `json:"attachments"`
+ Closed *time.Time `json:"-"`
+ Description string `json:"description"`
+ Entity *TicketEntity `json:"entity"`
+ GravatarID string `json:"gravatar_id"`
+ Opened *time.Time `json:"-"`
+ OpenedBy string `json:"opened_by"`
+ Status TicketStatus `json:"status"`
+ Summary string `json:"summary"`
+ Updated *time.Time `json:"-"`
+ UpdatedBy string `json:"updated_by"`
+}
+
+// TicketEntity refers a ticket to a specific entity
+type TicketEntity struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Type string `json:"type"`
+ URL string `json:"url"`
+}
+
+// TicketStatus constants start with Ticket and include Linode API Ticket Status values
+type TicketStatus string
+
+// TicketStatus constants reflect the current status of a Ticket
+const (
+ TicketNew TicketStatus = "new"
+ TicketClosed TicketStatus = "closed"
+ TicketOpen TicketStatus = "open"
+)
+
+// TicketsPagedResponse represents a paginated ticket API response
+type TicketsPagedResponse struct {
+ *PageOptions
+ Data []Ticket `json:"data"`
+}
+
+func (TicketsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Tickets.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *TicketsPagedResponse) appendData(r *TicketsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListTickets returns a collection of Support Tickets on the Account. Support Tickets
+// can be both tickets opened with Linode for support, as well as tickets generated by
+// Linode regarding the Account. This collection includes all Support Tickets generated
+// on the Account, with open tickets returned first.
+func (c *Client) ListTickets(ctx context.Context, opts *ListOptions) ([]Ticket, error) {
+ response := TicketsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetTicket gets a Support Ticket on the Account with the specified ID
+func (c *Client) GetTicket(ctx context.Context, id int) (*Ticket, error) {
+ e, err := c.Tickets.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).
+ SetResult(&Ticket{}).
+ Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Ticket), nil
+}
diff --git a/vendor/github.com/linode/linodego/tags.go b/vendor/github.com/linode/linodego/tags.go
new file mode 100644
index 000000000..671a36184
--- /dev/null
+++ b/vendor/github.com/linode/linodego/tags.go
@@ -0,0 +1,235 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+// Tag represents a Tag object
+type Tag struct {
+ Label string `json:"label"`
+}
+
+// TaggedObject represents a Tagged Object object
+type TaggedObject struct {
+ Type string `json:"type"`
+ RawData json.RawMessage `json:"data"`
+ Data interface{} `json:"-"`
+}
+
+// SortedObjects currently only includes Instances
+type SortedObjects struct {
+ Instances []Instance
+ LKEClusters []LKECluster
+ Domains []Domain
+ Volumes []Volume
+ NodeBalancers []NodeBalancer
+ /*
+ StackScripts []Stackscript
+ */
+}
+
+// TaggedObjectList are a list of TaggedObjects, as returning by ListTaggedObjects
+type TaggedObjectList []TaggedObject
+
+// TagCreateOptions fields are those accepted by CreateTag
+type TagCreateOptions struct {
+ Label string `json:"label"`
+ Linodes []int `json:"linodes,omitempty"`
+ // @TODO is this implemented?
+ LKEClusters []int `json:"lke_clusters,omitempty"`
+ Domains []int `json:"domains,omitempty"`
+ Volumes []int `json:"volumes,omitempty"`
+ NodeBalancers []int `json:"nodebalancers,omitempty"`
+}
+
+// GetCreateOptions converts a Tag to TagCreateOptions for use in CreateTag
+func (i Tag) GetCreateOptions() (o TagCreateOptions) {
+ o.Label = i.Label
+ return
+}
+
+// TaggedObjectsPagedResponse represents a paginated Tag API response
+type TaggedObjectsPagedResponse struct {
+ *PageOptions
+ Data []TaggedObject `json:"data"`
+}
+
+// TagsPagedResponse represents a paginated Tag API response
+type TagsPagedResponse struct {
+ *PageOptions
+ Data []Tag `json:"data"`
+}
+
+// endpoint gets the endpoint URL for Tag
+func (TagsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Tags.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// endpoint gets the endpoint URL for Tag
+func (TaggedObjectsPagedResponse) endpointWithID(c *Client, id string) string {
+ endpoint, err := c.Tags.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ endpoint = fmt.Sprintf("%s/%s", endpoint, id)
+ return endpoint
+}
+
+// appendData appends Tags when processing paginated Tag responses
+func (resp *TagsPagedResponse) appendData(r *TagsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// appendData appends TaggedObjects when processing paginated TaggedObjects responses
+func (resp *TaggedObjectsPagedResponse) appendData(r *TaggedObjectsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListTags lists Tags
+func (c *Client) ListTags(ctx context.Context, opts *ListOptions) ([]Tag, error) {
+ response := TagsPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// fixData stores an object of the type defined by Type in Data using RawData
+func (i *TaggedObject) fixData() (*TaggedObject, error) {
+ switch i.Type {
+ case "linode":
+ obj := Instance{}
+ if err := json.Unmarshal(i.RawData, &obj); err != nil {
+ return nil, err
+ }
+ i.Data = obj
+ case "lke_cluster":
+ obj := LKECluster{}
+ if err := json.Unmarshal(i.RawData, &obj); err != nil {
+ return nil, err
+ }
+ i.Data = obj
+ case "nodebalancer":
+ obj := NodeBalancer{}
+ if err := json.Unmarshal(i.RawData, &obj); err != nil {
+ return nil, err
+ }
+ i.Data = obj
+ case "domain":
+ obj := Domain{}
+ if err := json.Unmarshal(i.RawData, &obj); err != nil {
+ return nil, err
+ }
+ i.Data = obj
+ case "volume":
+ obj := Volume{}
+ if err := json.Unmarshal(i.RawData, &obj); err != nil {
+ return nil, err
+ }
+ i.Data = obj
+ }
+
+ return i, nil
+}
+
+// ListTaggedObjects lists Tagged Objects
+func (c *Client) ListTaggedObjects(ctx context.Context, label string, opts *ListOptions) (TaggedObjectList, error) {
+ response := TaggedObjectsPagedResponse{}
+ err := c.listHelperWithID(ctx, &response, label, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := range response.Data {
+ if _, err := response.Data[i].fixData(); err != nil {
+ return nil, err
+ }
+ }
+ return response.Data, nil
+}
+
+// SortedObjects converts a list of TaggedObjects into a Sorted Objects struct, for easier access
+func (t TaggedObjectList) SortedObjects() (SortedObjects, error) {
+ so := SortedObjects{}
+
+ for _, o := range t {
+ switch o.Type {
+ case "linode":
+ if instance, ok := o.Data.(Instance); ok {
+ so.Instances = append(so.Instances, instance)
+ } else {
+ return so, errors.New("expected an Instance when Type was \"linode\"")
+ }
+ case "lke_cluster":
+ if lkeCluster, ok := o.Data.(LKECluster); ok {
+ so.LKEClusters = append(so.LKEClusters, lkeCluster)
+ } else {
+ return so, errors.New("expected an LKECluster when Type was \"lke_cluster\"")
+ }
+ case "domain":
+ if domain, ok := o.Data.(Domain); ok {
+ so.Domains = append(so.Domains, domain)
+ } else {
+ return so, errors.New("expected a Domain when Type was \"domain\"")
+ }
+ case "volume":
+ if volume, ok := o.Data.(Volume); ok {
+ so.Volumes = append(so.Volumes, volume)
+ } else {
+ return so, errors.New("expected an Volume when Type was \"volume\"")
+ }
+ case "nodebalancer":
+ if nodebalancer, ok := o.Data.(NodeBalancer); ok {
+ so.NodeBalancers = append(so.NodeBalancers, nodebalancer)
+ } else {
+ return so, errors.New("expected an NodeBalancer when Type was \"nodebalancer\"")
+ }
+ }
+ }
+ return so, nil
+}
+
+// CreateTag creates a Tag
+func (c *Client) CreateTag(ctx context.Context, createOpts TagCreateOptions) (*Tag, error) {
+ var body string
+ e, err := c.Tags.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ req := c.R(ctx).SetResult(&Tag{})
+
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Tag), nil
+}
+
+// DeleteTag deletes the Tag with the specified id
+func (c *Client) DeleteTag(ctx context.Context, label string) error {
+ e, err := c.Tags.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%s", e, label)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/types.go b/vendor/github.com/linode/linodego/types.go
new file mode 100644
index 000000000..d1ffbafef
--- /dev/null
+++ b/vendor/github.com/linode/linodego/types.go
@@ -0,0 +1,90 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// LinodeType represents a linode type object
+type LinodeType struct {
+ ID string `json:"id"`
+ Disk int `json:"disk"`
+ Class LinodeTypeClass `json:"class"` // enum: nanode, standard, highmem, dedicated
+ Price *LinodePrice `json:"price"`
+ Label string `json:"label"`
+ Addons *LinodeAddons `json:"addons"`
+ NetworkOut int `json:"network_out"`
+ Memory int `json:"memory"`
+ Transfer int `json:"transfer"`
+ VCPUs int `json:"vcpus"`
+}
+
+// LinodePrice represents a linode type price object
+type LinodePrice struct {
+ Hourly float32 `json:"hourly"`
+ Monthly float32 `json:"monthly"`
+}
+
+// LinodeBackupsAddon represents a linode backups addon object
+type LinodeBackupsAddon struct {
+ Price *LinodePrice `json:"price"`
+}
+
+// LinodeAddons represent the linode addons object
+type LinodeAddons struct {
+ Backups *LinodeBackupsAddon `json:"backups"`
+}
+
+// LinodeTypeClass constants start with Class and include Linode API Instance Type Classes
+type LinodeTypeClass string
+
+// LinodeTypeClass contants are the Instance Type Classes that an Instance Type can be assigned
+const (
+ ClassNanode LinodeTypeClass = "nanode"
+ ClassStandard LinodeTypeClass = "standard"
+ ClassHighmem LinodeTypeClass = "highmem"
+ ClassDedicated LinodeTypeClass = "dedicated"
+)
+
+// LinodeTypesPagedResponse represents a linode types API response for listing
+type LinodeTypesPagedResponse struct {
+ *PageOptions
+ Data []LinodeType `json:"data"`
+}
+
+func (LinodeTypesPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Types.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *LinodeTypesPagedResponse) appendData(r *LinodeTypesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListTypes lists linode types
+func (c *Client) ListTypes(ctx context.Context, opts *ListOptions) ([]LinodeType, error) {
+ response := LinodeTypesPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetType gets the type with the provided ID
+func (c *Client) GetType(ctx context.Context, typeID string) (*LinodeType, error) {
+ e, err := c.Types.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%s", e, typeID)
+
+ r, err := coupleAPIErrors(c.Types.R(ctx).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*LinodeType), nil
+}
diff --git a/vendor/github.com/linode/linodego/version.go b/vendor/github.com/linode/linodego/version.go
new file mode 100644
index 000000000..cad2662fb
--- /dev/null
+++ b/vendor/github.com/linode/linodego/version.go
@@ -0,0 +1,34 @@
+package linodego
+
+import (
+ "fmt"
+ "runtime/debug"
+)
+
+const packagePath = "github.com/linode/linodego"
+
+var (
+ Version = "dev"
+
+ // DefaultUserAgent is the default User-Agent sent in HTTP request headers
+ DefaultUserAgent string
+)
+
+// init attempts to source the version from the build info injected
+// at runtime and sets the DefaultUserAgent.
+func init() {
+ buildInfo, ok := debug.ReadBuildInfo()
+ if ok {
+ for _, dep := range buildInfo.Deps {
+ if dep.Path == packagePath {
+ if dep.Replace != nil {
+ Version = dep.Replace.Version
+ }
+ Version = dep.Version
+ break
+ }
+ }
+ }
+
+ DefaultUserAgent = fmt.Sprintf("linodego/%s https://github.com/linode/linodego", Version)
+}
diff --git a/vendor/github.com/linode/linodego/vlans.go b/vendor/github.com/linode/linodego/vlans.go
new file mode 100644
index 000000000..e84c6b214
--- /dev/null
+++ b/vendor/github.com/linode/linodego/vlans.go
@@ -0,0 +1,65 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+type VLAN struct {
+ Label string `json:"label"`
+ Linodes []int `json:"linodes"`
+ Region string `json:"region"`
+ Created *time.Time `json:"-"`
+}
+
+// UnmarshalJSON for VLAN responses
+func (v *VLAN) UnmarshalJSON(b []byte) error {
+ type Mask VLAN
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ }{
+ Mask: (*Mask)(v),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ v.Created = (*time.Time)(p.Created)
+ return nil
+}
+
+// VLANsPagedResponse represents a Linode API response for listing of VLANs
+type VLANsPagedResponse struct {
+ *PageOptions
+ Data []VLAN `json:"data"`
+}
+
+func (VLANsPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.VLANs.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+func (resp *VLANsPagedResponse) appendData(r *VLANsPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListVLANs returns a paginated list of VLANs
+func (c *Client) ListVLANs(ctx context.Context, opts *ListOptions) ([]VLAN, error) {
+ response := VLANsPagedResponse{}
+
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Data, nil
+}
diff --git a/vendor/github.com/linode/linodego/volumes.go b/vendor/github.com/linode/linodego/volumes.go
new file mode 100644
index 000000000..429e79f84
--- /dev/null
+++ b/vendor/github.com/linode/linodego/volumes.go
@@ -0,0 +1,297 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/linode/linodego/internal/parseabletime"
+)
+
+// VolumeStatus indicates the status of the Volume
+type VolumeStatus string
+
+const (
+ // VolumeCreating indicates the Volume is being created and is not yet available for use
+ VolumeCreating VolumeStatus = "creating"
+
+ // VolumeActive indicates the Volume is online and available for use
+ VolumeActive VolumeStatus = "active"
+
+ // VolumeResizing indicates the Volume is in the process of upgrading its current capacity
+ VolumeResizing VolumeStatus = "resizing"
+
+ // VolumeContactSupport indicates there is a problem with the Volume. A support ticket must be opened to resolve the issue
+ VolumeContactSupport VolumeStatus = "contact_support"
+)
+
+// Volume represents a linode volume object
+type Volume struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Status VolumeStatus `json:"status"`
+ Region string `json:"region"`
+ Size int `json:"size"`
+ LinodeID *int `json:"linode_id"`
+ FilesystemPath string `json:"filesystem_path"`
+ Tags []string `json:"tags"`
+ Created *time.Time `json:"-"`
+ Updated *time.Time `json:"-"`
+}
+
+// VolumeCreateOptions fields are those accepted by CreateVolume
+type VolumeCreateOptions struct {
+ Label string `json:"label,omitempty"`
+ Region string `json:"region,omitempty"`
+ LinodeID int `json:"linode_id,omitempty"`
+ ConfigID int `json:"config_id,omitempty"`
+ // The Volume's size, in GiB. Minimum size is 10GiB, maximum size is 10240GiB. A "0" value will result in the default size.
+ Size int `json:"size,omitempty"`
+ // An array of tags applied to this object. Tags are for organizational purposes only.
+ Tags []string `json:"tags"`
+ PersistAcrossBoots *bool `json:"persist_across_boots,omitempty"`
+}
+
+// VolumeUpdateOptions fields are those accepted by UpdateVolume
+type VolumeUpdateOptions struct {
+ Label string `json:"label,omitempty"`
+ Tags *[]string `json:"tags,omitempty"`
+}
+
+// VolumeAttachOptions fields are those accepted by AttachVolume
+type VolumeAttachOptions struct {
+ LinodeID int `json:"linode_id"`
+ ConfigID int `json:"config_id,omitempty"`
+ PersistAcrossBoots *bool `json:"persist_across_boots,omitempty"`
+}
+
+// VolumesPagedResponse represents a linode API response for listing of volumes
+type VolumesPagedResponse struct {
+ *PageOptions
+ Data []Volume `json:"data"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (v *Volume) UnmarshalJSON(b []byte) error {
+ type Mask Volume
+
+ p := struct {
+ *Mask
+ Created *parseabletime.ParseableTime `json:"created"`
+ Updated *parseabletime.ParseableTime `json:"updated"`
+ }{
+ Mask: (*Mask)(v),
+ }
+
+ if err := json.Unmarshal(b, &p); err != nil {
+ return err
+ }
+
+ v.Created = (*time.Time)(p.Created)
+ v.Updated = (*time.Time)(p.Updated)
+
+ return nil
+}
+
+// GetUpdateOptions converts a Volume to VolumeUpdateOptions for use in UpdateVolume
+func (v Volume) GetUpdateOptions() (updateOpts VolumeUpdateOptions) {
+ updateOpts.Label = v.Label
+ updateOpts.Tags = &v.Tags
+ return
+}
+
+// GetCreateOptions converts a Volume to VolumeCreateOptions for use in CreateVolume
+func (v Volume) GetCreateOptions() (createOpts VolumeCreateOptions) {
+ createOpts.Label = v.Label
+ createOpts.Tags = v.Tags
+ createOpts.Region = v.Region
+ createOpts.Size = v.Size
+ if v.LinodeID != nil && *v.LinodeID > 0 {
+ createOpts.LinodeID = *v.LinodeID
+ }
+ return
+}
+
+// endpoint gets the endpoint URL for Volume
+func (VolumesPagedResponse) endpoint(c *Client) string {
+ endpoint, err := c.Volumes.Endpoint()
+ if err != nil {
+ panic(err)
+ }
+ return endpoint
+}
+
+// appendData appends Volumes when processing paginated Volume responses
+func (resp *VolumesPagedResponse) appendData(r *VolumesPagedResponse) {
+ resp.Data = append(resp.Data, r.Data...)
+}
+
+// ListVolumes lists Volumes
+func (c *Client) ListVolumes(ctx context.Context, opts *ListOptions) ([]Volume, error) {
+ response := VolumesPagedResponse{}
+ err := c.listHelper(ctx, &response, opts)
+ if err != nil {
+ return nil, err
+ }
+ return response.Data, nil
+}
+
+// GetVolume gets the template with the provided ID
+func (c *Client) GetVolume(ctx context.Context, id int) (*Volume, error) {
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+ r, err := coupleAPIErrors(c.R(ctx).SetResult(&Volume{}).Get(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Volume), nil
+}
+
+// AttachVolume attaches a volume to a Linode instance
+func (c *Client) AttachVolume(ctx context.Context, id int, options *VolumeAttachOptions) (*Volume, error) {
+ body := ""
+ if bodyData, err := json.Marshal(options); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return nil, NewError(err)
+ }
+
+ e = fmt.Sprintf("%s/%d/attach", e, id)
+ resp, err := coupleAPIErrors(c.R(ctx).
+ SetResult(&Volume{}).
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.Result().(*Volume), nil
+}
+
+// CreateVolume creates a Linode Volume
+func (c *Client) CreateVolume(ctx context.Context, createOpts VolumeCreateOptions) (*Volume, error) {
+ body := ""
+ if bodyData, err := json.Marshal(createOpts); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return nil, NewError(err)
+ }
+
+ resp, err := coupleAPIErrors(c.R(ctx).
+ SetResult(&Volume{}).
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.Result().(*Volume), nil
+}
+
+// UpdateVolume updates the Volume with the specified id
+func (c *Client) UpdateVolume(ctx context.Context, id int, volume VolumeUpdateOptions) (*Volume, error) {
+ var body string
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return nil, err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ req := c.R(ctx).SetResult(&Volume{})
+
+ if bodyData, err := json.Marshal(volume); err == nil {
+ body = string(bodyData)
+ } else {
+ return nil, NewError(err)
+ }
+
+ r, err := coupleAPIErrors(req.
+ SetBody(body).
+ Put(e))
+ if err != nil {
+ return nil, err
+ }
+ return r.Result().(*Volume), nil
+}
+
+// CloneVolume clones a Linode volume
+func (c *Client) CloneVolume(ctx context.Context, id int, label string) (*Volume, error) {
+ body := fmt.Sprintf("{\"label\":\"%s\"}", label)
+
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return nil, NewError(err)
+ }
+ e = fmt.Sprintf("%s/%d/clone", e, id)
+
+ resp, err := coupleAPIErrors(c.R(ctx).
+ SetResult(&Volume{}).
+ SetBody(body).
+ Post(e))
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.Result().(*Volume), nil
+}
+
+// DetachVolume detaches a Linode volume
+func (c *Client) DetachVolume(ctx context.Context, id int) error {
+ body := ""
+
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return NewError(err)
+ }
+
+ e = fmt.Sprintf("%s/%d/detach", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).
+ SetBody(body).
+ Post(e))
+
+ return err
+}
+
+// ResizeVolume resizes an instance to new Linode type
+func (c *Client) ResizeVolume(ctx context.Context, id int, size int) error {
+ body := fmt.Sprintf("{\"size\": %d}", size)
+
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return NewError(err)
+ }
+ e = fmt.Sprintf("%s/%d/resize", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).
+ SetBody(body).
+ Post(e))
+
+ return err
+}
+
+// DeleteVolume deletes the Volume with the specified id
+func (c *Client) DeleteVolume(ctx context.Context, id int) error {
+ e, err := c.Volumes.Endpoint()
+ if err != nil {
+ return err
+ }
+ e = fmt.Sprintf("%s/%d", e, id)
+
+ _, err = coupleAPIErrors(c.R(ctx).Delete(e))
+ return err
+}
diff --git a/vendor/github.com/linode/linodego/waitfor.go b/vendor/github.com/linode/linodego/waitfor.go
new file mode 100644
index 000000000..b7450558c
--- /dev/null
+++ b/vendor/github.com/linode/linodego/waitfor.go
@@ -0,0 +1,429 @@
+package linodego
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// WaitForInstanceStatus waits for the Linode instance to reach the desired state
+// before returning. It will timeout with an error after timeoutSeconds.
+func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int, status InstanceStatus, timeoutSeconds int) (*Instance, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ instance, err := client.GetInstance(ctx, instanceID)
+ if err != nil {
+ return instance, err
+ }
+ complete := (instance.Status == status)
+
+ if complete {
+ return instance, nil
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("Error waiting for Instance %d status %s: %s", instanceID, status, ctx.Err())
+ }
+ }
+}
+
+// WaitForInstanceDiskStatus waits for the Linode instance disk to reach the desired state
+// before returning. It will timeout with an error after timeoutSeconds.
+func (client Client) WaitForInstanceDiskStatus(ctx context.Context, instanceID int, diskID int, status DiskStatus, timeoutSeconds int) (*InstanceDisk, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ // GetInstanceDisk will 404 on newly created disks. use List instead.
+ // disk, err := client.GetInstanceDisk(ctx, instanceID, diskID)
+ disks, err := client.ListInstanceDisks(ctx, instanceID, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, disk := range disks {
+ disk := disk
+ if disk.ID == diskID {
+ complete := (disk.Status == status)
+ if complete {
+ return &disk, nil
+ }
+
+ break
+ }
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("Error waiting for Instance %d Disk %d status %s: %s", instanceID, diskID, status, ctx.Err())
+ }
+ }
+}
+
+// WaitForVolumeStatus waits for the Volume to reach the desired state
+// before returning. It will timeout with an error after timeoutSeconds.
+func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, status VolumeStatus, timeoutSeconds int) (*Volume, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ volume, err := client.GetVolume(ctx, volumeID)
+ if err != nil {
+ return volume, err
+ }
+ complete := (volume.Status == status)
+
+ if complete {
+ return volume, nil
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("Error waiting for Volume %d status %s: %s", volumeID, status, ctx.Err())
+ }
+ }
+}
+
+// WaitForSnapshotStatus waits for the Snapshot to reach the desired state
+// before returning. It will timeout with an error after timeoutSeconds.
+func (client Client) WaitForSnapshotStatus(ctx context.Context, instanceID int, snapshotID int, status InstanceSnapshotStatus, timeoutSeconds int) (*InstanceSnapshot, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ snapshot, err := client.GetInstanceSnapshot(ctx, instanceID, snapshotID)
+ if err != nil {
+ return snapshot, err
+ }
+ complete := (snapshot.Status == status)
+
+ if complete {
+ return snapshot, nil
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("Error waiting for Instance %d Snapshot %d status %s: %s", instanceID, snapshotID, status, ctx.Err())
+ }
+ }
+}
+
+// WaitForVolumeLinodeID waits for the Volume to match the desired LinodeID
+// before returning. An active Instance will not immediately attach or detach a volume, so
+// the LinodeID must be polled to determine volume readiness from the API.
+// WaitForVolumeLinodeID will timeout with an error after timeoutSeconds.
+func (client Client) WaitForVolumeLinodeID(ctx context.Context, volumeID int, linodeID *int, timeoutSeconds int) (*Volume, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ volume, err := client.GetVolume(ctx, volumeID)
+ if err != nil {
+ return volume, err
+ }
+
+ switch {
+ case linodeID == nil && volume.LinodeID == nil:
+ return volume, nil
+ case linodeID == nil || volume.LinodeID == nil:
+ // continue waiting
+ case *volume.LinodeID == *linodeID:
+ return volume, nil
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("Error waiting for Volume %d to have Instance %v: %s", volumeID, linodeID, ctx.Err())
+ }
+ }
+}
+
+// WaitForLKEClusterStatus waits for the LKECluster to reach the desired state
+// before returning. It will timeout with an error after timeoutSeconds.
+func (client Client) WaitForLKEClusterStatus(ctx context.Context, clusterID int, status LKEClusterStatus, timeoutSeconds int) (*LKECluster, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ cluster, err := client.GetLKECluster(ctx, clusterID)
+ if err != nil {
+ return cluster, err
+ }
+ complete := (cluster.Status == status)
+
+ if complete {
+ return cluster, nil
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("Error waiting for Cluster %d status %s: %s", clusterID, status, ctx.Err())
+ }
+ }
+}
+
+// LKEClusterPollOptions configures polls against LKE Clusters.
+type LKEClusterPollOptions struct {
+ // TimeoutSeconds is the number of Seconds to wait for the poll to succeed
+ // before exiting.
+ TimeoutSeconds int
+
+ // TansportWrapper allows adding a transport middleware function that will
+ // wrap the LKE Cluster client's underlying http.RoundTripper.
+ TransportWrapper func(http.RoundTripper) http.RoundTripper
+}
+
+type ClusterConditionOptions struct {
+ LKEClusterKubeconfig *LKEClusterKubeconfig
+ TransportWrapper func(http.RoundTripper) http.RoundTripper
+}
+
+// ClusterConditionFunc represents a function that tests a condition against an LKE cluster,
+// returns true if the condition has been reached, false if it has not yet been reached.
+type ClusterConditionFunc func(context.Context, ClusterConditionOptions) (bool, error)
+
+// WaitForLKEClusterConditions waits for the given LKE conditions to be true
+func (client Client) WaitForLKEClusterConditions(
+ ctx context.Context,
+ clusterID int,
+ options LKEClusterPollOptions,
+ conditions ...ClusterConditionFunc,
+) error {
+ ctx, cancel := context.WithCancel(ctx)
+ if options.TimeoutSeconds != 0 {
+ ctx, cancel = context.WithTimeout(ctx, time.Duration(options.TimeoutSeconds)*time.Second)
+ }
+ defer cancel()
+
+ lkeKubeConfig, err := client.GetLKEClusterKubeconfig(ctx, clusterID)
+ if err != nil {
+ return fmt.Errorf("failed to get Kubeconfig for LKE cluster %d: %s", clusterID, err)
+ }
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ conditionOptions := ClusterConditionOptions{LKEClusterKubeconfig: lkeKubeConfig, TransportWrapper: options.TransportWrapper}
+
+ for _, condition := range conditions {
+ ConditionSucceeded:
+ for {
+ select {
+ case <-ticker.C:
+ result, err := condition(ctx, conditionOptions)
+ if err != nil {
+ return err
+ }
+
+ if result {
+ break ConditionSucceeded
+ }
+
+ case <-ctx.Done():
+ return fmt.Errorf("Error waiting for cluster %d conditions: %s", clusterID, ctx.Err())
+ }
+ }
+ }
+ return nil
+}
+
+// WaitForEventFinished waits for an entity action to reach the 'finished' state
+// before returning. It will timeout with an error after timeoutSeconds.
+// If the event indicates a failure both the failed event and the error will be returned.
+// nolint
+func (client Client) WaitForEventFinished(ctx context.Context, id interface{}, entityType EntityType, action EventAction, minStart time.Time, timeoutSeconds int) (*Event, error) {
+ titledEntityType := strings.Title(string(entityType))
+ filterStruct := map[string]interface{}{
+ // Nor is action
+ "action": action,
+
+ "created": map[string]interface{}{
+ // The API uses UTC time, so we need to ensure the time is converted
+ "+gte": minStart.UTC().Format("2006-01-02T15:04:05"),
+ },
+
+ // With potentially 1000+ events coming back, we should filter on something
+ // Warning: This optimization has the potential to break if users are clearing
+ // events before we see them.
+ "seen": false,
+
+ // Float the latest events to page 1
+ "+order_by": "created",
+ "+order": "desc",
+ }
+
+ // Optimistically restrict results to page 1. We should remove this when more
+ // precise filtering options exist.
+ pages := 1
+
+ // The API has limitted filtering support for Event ID and Event Type
+ // Optimize the list, if possible
+ switch entityType {
+ case EntityDisk, EntityLinode, EntityDomain, EntityNodebalancer:
+ // All of the filter supported types have int ids
+ filterableEntityID, err := strconv.Atoi(fmt.Sprintf("%v", id))
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing Entity ID %q for optimized WaitForEventFinished EventType %q: %s", id, entityType, err)
+ }
+ filterStruct["entity.id"] = filterableEntityID
+ filterStruct["entity.type"] = entityType
+
+ // TODO: are we conformatable with pages = 0 with the event type and id filter?
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ if deadline, ok := ctx.Deadline(); ok {
+ duration := time.Until(deadline)
+ log.Printf("[INFO] Waiting %d seconds for %s events since %v for %s %v", int(duration.Seconds()), action, minStart, titledEntityType, id)
+ }
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+
+ // avoid repeating log messages
+ nextLog := ""
+ lastLog := ""
+ lastEventID := 0
+
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ if lastEventID > 0 {
+ filterStruct["id"] = map[string]interface{}{
+ "+gte": lastEventID,
+ }
+ }
+
+ filter, err := json.Marshal(filterStruct)
+ if err != nil {
+ return nil, err
+ }
+ listOptions := NewListOptions(pages, string(filter))
+
+ events, err := client.ListEvents(ctx, listOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there are events for this instance + action, inspect them
+ for _, event := range events {
+ event := event
+
+ if event.Entity == nil || event.Entity.Type != entityType {
+ // log.Println("type mismatch", event.Entity.Type, entityType)
+ continue
+ }
+
+ var entID string
+
+ switch id := event.Entity.ID.(type) {
+ case float64, float32:
+ entID = fmt.Sprintf("%.f", id)
+ case int:
+ entID = strconv.Itoa(id)
+ default:
+ entID = fmt.Sprintf("%v", id)
+ }
+
+ var findID string
+ switch id := id.(type) {
+ case float64, float32:
+ findID = fmt.Sprintf("%.f", id)
+ case int:
+ findID = strconv.Itoa(id)
+ default:
+ findID = fmt.Sprintf("%v", id)
+ }
+
+ if entID != findID {
+ // log.Println("id mismatch", entID, findID)
+ continue
+ }
+
+ // @TODO(displague) This event.Created check shouldn't be needed, but it appears
+ // that the ListEvents method is not populating it correctly
+ if event.Created == nil {
+ log.Printf("[WARN] event.Created is nil when API returned: %#+v", event.Created)
+ }
+
+ // This is the event we are looking for. Save our place.
+ if lastEventID == 0 {
+ lastEventID = event.ID
+ }
+
+ switch event.Status {
+ case EventFailed:
+ return &event, fmt.Errorf("%s %v action %s failed", titledEntityType, id, action)
+ case EventFinished:
+ log.Printf("[INFO] %s %v action %s is finished", titledEntityType, id, action)
+ return &event, nil
+ }
+ // TODO(displague) can we bump the ticker to TimeRemaining/2 (>=1) when non-nil?
+ nextLog = fmt.Sprintf("[INFO] %s %v action %s is %s", titledEntityType, id, action, event.Status)
+ }
+
+ // de-dupe logging statements
+ if nextLog != lastLog {
+ log.Print(nextLog)
+ lastLog = nextLog
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("Error waiting for Event Status '%s' of %s %v action '%s': %s", EventFinished, titledEntityType, id, action, ctx.Err())
+ }
+ }
+}
+
+// WaitForImageStatus waits for the Image to reach the desired state
+// before returning. It will timeout with an error after timeoutSeconds.
+func (client Client) WaitForImageStatus(ctx context.Context, imageID string, status ImageStatus, timeoutSeconds int) (*Image, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ image, err := client.GetImage(ctx, imageID)
+ if err != nil {
+ return image, err
+ }
+ complete := image.Status == status
+
+ if complete {
+ return image, nil
+ }
+ case <-ctx.Done():
+ return nil, fmt.Errorf("failed to wait for Image %s status %s: %s", imageID, status, ctx.Err())
+ }
+ }
+}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
index b9e936344..41215d7fc 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_windows.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -10,6 +10,7 @@ import (
"os"
"strconv"
"strings"
+ "sync"
"syscall"
"unsafe"
@@ -27,6 +28,7 @@ const (
backgroundRed = 0x40
backgroundIntensity = 0x80
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+ commonLvbUnderscore = 0x8000
cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
)
@@ -93,6 +95,7 @@ type Writer struct {
oldattr word
oldpos coord
rest bytes.Buffer
+ mutex sync.Mutex
}
// NewColorable returns new instance of Writer which handles escape sequence from File.
@@ -432,6 +435,8 @@ func atoiWithDefault(s string, def int) (int, error) {
// Write writes data on console
func (w *Writer) Write(data []byte) (n int, err error) {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
@@ -683,14 +688,19 @@ loop:
switch {
case n == 0 || n == 100:
attr = w.oldattr
- case 1 <= n && n <= 5:
+ case n == 4:
+ attr |= commonLvbUnderscore
+ case (1 <= n && n <= 3) || n == 5:
attr |= foregroundIntensity
- case n == 7:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
- case n == 22 || n == 25:
- attr |= foregroundIntensity
- case n == 27:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case n == 7 || n == 27:
+ attr =
+ (attr &^ (foregroundMask | backgroundMask)) |
+ ((attr & foregroundMask) << 4) |
+ ((attr & backgroundMask) >> 4)
+ case n == 22:
+ attr &^= foregroundIntensity
+ case n == 24:
+ attr &^= commonLvbUnderscore
case 30 <= n && n <= 37:
attr &= backgroundMask
if (n-30)&1 != 0 {
@@ -709,7 +719,7 @@ loop:
n256setup()
}
attr &= backgroundMask
- attr |= n256foreAttr[n256]
+ attr |= n256foreAttr[n256%len(n256foreAttr)]
i += 2
}
} else if len(token) == 5 && token[i+1] == "2" {
@@ -751,7 +761,7 @@ loop:
n256setup()
}
attr &= foregroundMask
- attr |= n256backAttr[n256]
+ attr |= n256backAttr[n256%len(n256backAttr)]
i += 2
}
} else if len(token) == 5 && token[i+1] == "2" {
diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml
deleted file mode 100644
index 7d9b17275..000000000
--- a/vendor/github.com/miekg/dns/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-sudo: false
-
-go:
- - 1.14.x
- - 1.15.x
- - tip
-
-env:
- - GO111MODULE=on
-
-script:
- - go generate ./... && test `git ls-files --modified | wc -l` = 0
- - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release
index 8fb748e8a..a0ce9b712 100644
--- a/vendor/github.com/miekg/dns/Makefile.release
+++ b/vendor/github.com/miekg/dns/Makefile.release
@@ -1,7 +1,7 @@
# Makefile for releasing.
#
# The release is controlled from version.go. The version found there is
-# used to tag the git repo, we're not building any artifects so there is nothing
+# used to tag the git repo, we're not building any artifacts so there is nothing
# to upload to github.
#
# * Up the version in version.go
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index fc8394e26..d5b78ef41 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -69,6 +69,11 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://zonedb.org/
* https://router7.org/
* https://github.com/fortio/dnsping
+* https://github.com/Luzilla/dnsbl_exporter
+* https://github.com/bodgit/tsig
+* https://github.com/v2fly/v2ray-core (test only)
+* https://kuma.io/
+
Send pull request if you want to be listed here.
@@ -165,6 +170,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 7873 - Domain Name System (DNS) Cookies
* 8080 - EdDSA for DNSSEC
* 8499 - DNS Terminology
+* 8659 - DNS Certification Authority Authorization (CAA) Resource Record
+* 8914 - Extended DNS Errors
+* 8976 - Message Digest for DNS Zones (ZONEMD RR)
## Loosely Based Upon
diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go
index 825617fe2..3f29a48c4 100644
--- a/vendor/github.com/miekg/dns/acceptfunc.go
+++ b/vendor/github.com/miekg/dns/acceptfunc.go
@@ -25,6 +25,7 @@ var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
// MsgAcceptAction represents the action to be taken.
type MsgAcceptAction int
+// Allowed returned values from a MsgAcceptFunc.
const (
MsgAccept MsgAcceptAction = iota // Accept the message
MsgReject // Reject the message with a RcodeFormatError
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
index e7ff786a2..f907698b5 100644
--- a/vendor/github.com/miekg/dns/client.go
+++ b/vendor/github.com/miekg/dns/client.go
@@ -23,6 +23,7 @@ type Conn struct {
net.Conn // a net.Conn holding the connection
UDPSize uint16 // minimum receive buffer for UDP messages
TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
+ TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
tsigRequestMAC string
}
@@ -40,6 +41,7 @@ type Client struct {
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
+ TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
}
@@ -124,7 +126,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
// of 512 bytes
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
// attribute appropriately
-
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
co, err := c.Dial(address)
@@ -176,7 +177,7 @@ func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err erro
co.UDPSize = c.UDPSize
}
- co.TsigSecret = c.TsigSecret
+ co.TsigSecret, co.TsigProvider = c.TsigSecret, c.TsigProvider
t := time.Now()
// write with the appropriate write timeout
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
@@ -223,11 +224,15 @@ func (co *Conn) ReadMsg() (*Msg, error) {
return m, err
}
if t := m.IsTsig(); t != nil {
- if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
- return m, ErrSecret
+ if co.TsigProvider != nil {
+ err = tsigVerifyProvider(p, co.TsigProvider, co.tsigRequestMAC, false)
+ } else {
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return m, ErrSecret
+ }
+ // Need to work on the original message p, as that was used to calculate the tsig.
+ err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
}
- // Need to work on the original message p, as that was used to calculate the tsig.
- err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
}
return m, err
}
@@ -305,10 +310,14 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
var out []byte
if t := m.IsTsig(); t != nil {
mac := ""
- if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
- return ErrSecret
+ if co.TsigProvider != nil {
+ out, mac, err = tsigGenerateProvider(m, co.TsigProvider, co.tsigRequestMAC, false)
+ } else {
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return ErrSecret
+ }
+ out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
}
- out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
// Set for the next read, although only used in zone transfers
co.tsigRequestMAC = mac
} else {
@@ -331,11 +340,10 @@ func (co *Conn) Write(p []byte) (int, error) {
return co.Conn.Write(p)
}
- l := make([]byte, 2)
- binary.BigEndian.PutUint16(l, uint16(len(p)))
-
- n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
- return int(n), err
+ msg := make([]byte, 2+len(p))
+ binary.BigEndian.PutUint16(msg, uint16(len(p)))
+ copy(msg[2:], p)
+ return co.Conn.Write(msg)
}
// Return the appropriate timeout for a specific request
@@ -371,7 +379,7 @@ func Dial(network, address string) (conn *Conn, err error) {
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
client := Client{Net: "udp"}
r, _, err = client.ExchangeContext(ctx, m, a)
- // ignorint rtt to leave the original ExchangeContext API unchanged, but
+ // ignoring rtt to leave the original ExchangeContext API unchanged, but
// this function will go away
return r, err
}
diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go
index d874e3008..d47b0b1f2 100644
--- a/vendor/github.com/miekg/dns/defaults.go
+++ b/vendor/github.com/miekg/dns/defaults.go
@@ -349,10 +349,7 @@ func ReverseAddr(addr string) (arpa string, err error) {
// Add it, in reverse, to the buffer
for i := len(ip) - 1; i >= 0; i-- {
v := ip[i]
- buf = append(buf, hexDigit[v&0xF])
- buf = append(buf, '.')
- buf = append(buf, hexDigit[v>>4])
- buf = append(buf, '.')
+ buf = append(buf, hexDigit[v&0xF], '.', hexDigit[v>>4], '.')
}
// Append "ip6.arpa." and return (buf already has the final .)
buf = append(buf, "ip6.arpa."...)
diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go
index ad83a27ec..a88484b06 100644
--- a/vendor/github.com/miekg/dns/dns.go
+++ b/vendor/github.com/miekg/dns/dns.go
@@ -1,6 +1,9 @@
package dns
-import "strconv"
+import (
+ "encoding/hex"
+ "strconv"
+)
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
@@ -111,7 +114,7 @@ func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
- buf := make([]byte, Len(r)*2)
+ buf := make([]byte, Len(r))
headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
if err != nil {
return err
@@ -126,9 +129,30 @@ func (rr *RFC3597) ToRFC3597(r RR) error {
}
_, err = rr.unpack(buf, headerEnd)
+ return err
+}
+
+// fromRFC3597 converts an unknown RR representation from RFC 3597 to the known RR type.
+func (rr *RFC3597) fromRFC3597(r RR) error {
+ hdr := r.Header()
+ *hdr = rr.Hdr
+
+ // Can't overflow uint16 as the length of Rdata is validated in (*RFC3597).parse.
+ // We can only get here when rr was constructed with that method.
+ hdr.Rdlength = uint16(hex.DecodedLen(len(rr.Rdata)))
+
+ if noRdata(*hdr) {
+ // Dynamic update.
+ return nil
+ }
+
+ // rr.pack requires an extra allocation and a copy so we just decode Rdata
+ // manually, it's simpler anyway.
+ msg, err := hex.DecodeString(rr.Rdata)
if err != nil {
return err
}
- return nil
+ _, err = r.unpack(msg, 0)
+ return err
}
diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go
index 900f6e059..8539aae6c 100644
--- a/vendor/github.com/miekg/dns/dnssec.go
+++ b/vendor/github.com/miekg/dns/dnssec.go
@@ -4,12 +4,13 @@ import (
"bytes"
"crypto"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
- _ "crypto/sha1"
- _ "crypto/sha256"
- _ "crypto/sha512"
+ _ "crypto/sha1" // need its init function
+ _ "crypto/sha256" // need its init function
+ _ "crypto/sha512" // need its init function
"encoding/asn1"
"encoding/binary"
"encoding/hex"
@@ -17,8 +18,6 @@ import (
"sort"
"strings"
"time"
-
- "golang.org/x/crypto/ed25519"
)
// DNSSEC encryption algorithm codes.
@@ -373,6 +372,8 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte,
// Verify validates an RRSet with the signature and key. This is only the
// cryptographic test, the signature validity period must be checked separately.
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
+// It also checks that the Zone Key bit (RFC 4034 2.1.1) is set on the DNSKEY
+// and that the Protocol field is set to 3 (RFC 4034 2.1.2).
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
// First the easy checks
if !IsRRset(rrset) {
@@ -393,6 +394,12 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
if k.Protocol != 3 {
return ErrKey
}
+ // RFC 4034 2.1.1 If bit 7 has value 0, then the DNSKEY record holds some
+ // other type of DNS public key and MUST NOT be used to verify RRSIGs that
+ // cover RRsets.
+ if k.Flags&ZONE == 0 {
+ return ErrKey
+ }
// IsRRset checked that we have at least one RR and that the RRs in
// the set have consistent type, class, and name. Also check that type and
@@ -500,7 +507,7 @@ func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
return ti <= utc && utc <= te
}
-// Return the signatures base64 encodedig sigdata as a byte slice.
+// Return the signatures base64 encoding sigdata as a byte slice.
func (rr *RRSIG) sigBuf() []byte {
sigbuf, err := fromBase64([]byte(rr.Signature))
if err != nil {
diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go
index 2ab7b6d73..b8124b561 100644
--- a/vendor/github.com/miekg/dns/dnssec_keygen.go
+++ b/vendor/github.com/miekg/dns/dnssec_keygen.go
@@ -3,12 +3,11 @@ package dns
import (
"crypto"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"math/big"
-
- "golang.org/x/crypto/ed25519"
)
// Generate generates a DNSKEY of the given bit size.
diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go
index 6cbc28483..f79658169 100644
--- a/vendor/github.com/miekg/dns/dnssec_keyscan.go
+++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go
@@ -4,13 +4,12 @@ import (
"bufio"
"crypto"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/rsa"
"io"
"math/big"
"strconv"
"strings"
-
- "golang.org/x/crypto/ed25519"
)
// NewPrivateKey returns a PrivateKey by parsing the string s.
diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go
index 072e445da..f16077296 100644
--- a/vendor/github.com/miekg/dns/dnssec_privkey.go
+++ b/vendor/github.com/miekg/dns/dnssec_privkey.go
@@ -3,11 +3,10 @@ package dns
import (
"crypto"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/rsa"
"math/big"
"strconv"
-
- "golang.org/x/crypto/ed25519"
)
const format = "Private-key-format: v1.3\n"
diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go
index 6861de774..5c83f82e4 100644
--- a/vendor/github.com/miekg/dns/doc.go
+++ b/vendor/github.com/miekg/dns/doc.go
@@ -159,7 +159,7 @@ shows the options you have and what functions to call.
TRANSACTION SIGNATURE
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
-The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
+The supported algorithms include: HmacSHA1, HmacSHA256 and HmacSHA512.
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
must be fully qualified - as they are domain names) and the base64 secret
@@ -174,7 +174,7 @@ changes to the RRset after calling SetTsig() the signature will be incorrect.
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
- m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ m.SetTsig("axfr.", dns.HmacSHA256, 300, time.Now().Unix())
...
// When sending the TSIG RR is calculated and filled in before sending
@@ -187,13 +187,37 @@ request an AXFR for miek.nl. with TSIG key named "axfr." and secret
m := new(dns.Msg)
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m.SetAxfr("miek.nl.")
- m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ m.SetTsig("axfr.", dns.HmacSHA256, 300, time.Now().Unix())
c, err := t.In(m, "176.58.119.54:53")
for r := range c { ... }
You can now read the records from the transfer as they come in. Each envelope
is checked with TSIG. If something is not correct an error is returned.
+A custom TSIG implementation can be used. This requires additional code to
+perform any session establishment and signature generation/verification. The
+client must be configured with an implementation of the TsigProvider interface:
+
+ type Provider struct{}
+
+ func (*Provider) Generate(msg []byte, tsig *dns.TSIG) ([]byte, error) {
+ // Use tsig.Hdr.Name and tsig.Algorithm in your code to
+ // generate the MAC using msg as the payload.
+ }
+
+ func (*Provider) Verify(msg []byte, tsig *dns.TSIG) error {
+ // Use tsig.Hdr.Name and tsig.Algorithm in your code to verify
+ // that msg matches the value in tsig.MAC.
+ }
+
+ c := new(dns.Client)
+ c.TsigProvider = new(Provider)
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+ m.SetTsig(keyname, dns.HmacSHA256, 300, time.Now().Unix())
+ ...
+ // TSIG RR is calculated by calling your Generate method
+
Basic use pattern validating and replying to a message that has TSIG set.
server := &dns.Server{Addr: ":53", Net: "udp"}
@@ -207,7 +231,7 @@ Basic use pattern validating and replying to a message that has TSIG set.
if r.IsTsig() != nil {
if w.TsigStatus() == nil {
// *Msg r has an TSIG record and it was validated
- m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ m.SetTsig("axfr.", dns.HmacSHA256, 300, time.Now().Unix())
} else {
// *Msg r has an TSIG records and it was not validated
}
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index 04808d578..c9181783d 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -22,11 +22,47 @@ const (
EDNS0COOKIE = 0xa // EDNS0 Cookie
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
+ EDNS0EDE = 0xf // EDNS0 extended DNS errors (See RFC 8914)
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
_DO = 1 << 15 // DNSSEC OK
)
+// makeDataOpt is used to unpack the EDNS0 option(s) from a message.
+func makeDataOpt(code uint16) EDNS0 {
+ // All the EDNS0.* constants above need to be in this switch.
+ switch code {
+ case EDNS0LLQ:
+ return new(EDNS0_LLQ)
+ case EDNS0UL:
+ return new(EDNS0_UL)
+ case EDNS0NSID:
+ return new(EDNS0_NSID)
+ case EDNS0DAU:
+ return new(EDNS0_DAU)
+ case EDNS0DHU:
+ return new(EDNS0_DHU)
+ case EDNS0N3U:
+ return new(EDNS0_N3U)
+ case EDNS0SUBNET:
+ return new(EDNS0_SUBNET)
+ case EDNS0EXPIRE:
+ return new(EDNS0_EXPIRE)
+ case EDNS0COOKIE:
+ return new(EDNS0_COOKIE)
+ case EDNS0TCPKEEPALIVE:
+ return new(EDNS0_TCP_KEEPALIVE)
+ case EDNS0PADDING:
+ return new(EDNS0_PADDING)
+ case EDNS0EDE:
+ return new(EDNS0_EDE)
+ default:
+ e := new(EDNS0_LOCAL)
+ e.Code = code
+ return e
+ }
+}
+
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
// See RFC 6891.
type OPT struct {
@@ -73,6 +109,8 @@ func (rr *OPT) String() string {
s += "\n; LOCAL OPT: " + o.String()
case *EDNS0_PADDING:
s += "\n; PADDING: " + o.String()
+ case *EDNS0_EDE:
+ s += "\n; EDE: " + o.String()
}
}
return s
@@ -88,11 +126,11 @@ func (rr *OPT) len(off int, compression map[string]struct{}) int {
return l
}
-func (rr *OPT) parse(c *zlexer, origin string) *ParseError {
- panic("dns: internal error: parse should never be called on OPT")
+func (*OPT) parse(c *zlexer, origin string) *ParseError {
+ return &ParseError{err: "OPT records do not have a presentation format"}
}
-func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
+func (rr *OPT) isDuplicate(r2 RR) bool { return false }
// return the old value -> delete SetVersion?
@@ -148,6 +186,16 @@ func (rr *OPT) SetDo(do ...bool) {
}
}
+// Z returns the Z part of the OPT RR as a uint16 with only the 15 least significant bits used.
+func (rr *OPT) Z() uint16 {
+ return uint16(rr.Hdr.Ttl & 0x7FFF)
+}
+
+// SetZ sets the Z part of the OPT RR, note only the 15 least significant bits of z are used.
+func (rr *OPT) SetZ(z uint16) {
+ rr.Hdr.Ttl = rr.Hdr.Ttl&^0x7FFF | uint32(z&0x7FFF)
+}
+
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
type EDNS0 interface {
// Option returns the option code for the option.
@@ -452,7 +500,7 @@ func (e *EDNS0_LLQ) copy() EDNS0 {
return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
}
-// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
+// EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
type EDNS0_DAU struct {
Code uint16 // Always EDNS0DAU
AlgCode []uint8
@@ -525,7 +573,7 @@ func (e *EDNS0_N3U) String() string {
}
func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
-// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
+// EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314.
type EDNS0_EXPIRE struct {
Code uint16 // Always EDNS0EXPIRE
Expire uint32
@@ -673,3 +721,101 @@ func (e *EDNS0_PADDING) copy() EDNS0 {
copy(b, e.Padding)
return &EDNS0_PADDING{b}
}
+
+// Extended DNS Error Codes (RFC 8914).
+const (
+ ExtendedErrorCodeOther uint16 = iota
+ ExtendedErrorCodeUnsupportedDNSKEYAlgorithm
+ ExtendedErrorCodeUnsupportedDSDigestType
+ ExtendedErrorCodeStaleAnswer
+ ExtendedErrorCodeForgedAnswer
+ ExtendedErrorCodeDNSSECIndeterminate
+ ExtendedErrorCodeDNSBogus
+ ExtendedErrorCodeSignatureExpired
+ ExtendedErrorCodeSignatureNotYetValid
+ ExtendedErrorCodeDNSKEYMissing
+ ExtendedErrorCodeRRSIGsMissing
+ ExtendedErrorCodeNoZoneKeyBitSet
+ ExtendedErrorCodeNSECMissing
+ ExtendedErrorCodeCachedError
+ ExtendedErrorCodeNotReady
+ ExtendedErrorCodeBlocked
+ ExtendedErrorCodeCensored
+ ExtendedErrorCodeFiltered
+ ExtendedErrorCodeProhibited
+ ExtendedErrorCodeStaleNXDOMAINAnswer
+ ExtendedErrorCodeNotAuthoritative
+ ExtendedErrorCodeNotSupported
+ ExtendedErrorCodeNoReachableAuthority
+ ExtendedErrorCodeNetworkError
+ ExtendedErrorCodeInvalidData
+)
+
+// ExtendedErrorCodeToString maps extended error info codes to a human readable
+// description.
+var ExtendedErrorCodeToString = map[uint16]string{
+ ExtendedErrorCodeOther: "Other",
+ ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm",
+ ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type",
+ ExtendedErrorCodeStaleAnswer: "Stale Answer",
+ ExtendedErrorCodeForgedAnswer: "Forged Answer",
+ ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate",
+ ExtendedErrorCodeDNSBogus: "DNSSEC Bogus",
+ ExtendedErrorCodeSignatureExpired: "Signature Expired",
+ ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid",
+ ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing",
+ ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing",
+ ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set",
+ ExtendedErrorCodeNSECMissing: "NSEC Missing",
+ ExtendedErrorCodeCachedError: "Cached Error",
+ ExtendedErrorCodeNotReady: "Not Ready",
+ ExtendedErrorCodeBlocked: "Blocked",
+ ExtendedErrorCodeCensored: "Censored",
+ ExtendedErrorCodeFiltered: "Filtered",
+ ExtendedErrorCodeProhibited: "Prohibited",
+ ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer",
+ ExtendedErrorCodeNotAuthoritative: "Not Authoritative",
+ ExtendedErrorCodeNotSupported: "Not Supported",
+ ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority",
+ ExtendedErrorCodeNetworkError: "Network Error",
+ ExtendedErrorCodeInvalidData: "Invalid Data",
+}
+
+// StringToExtendedErrorCode is a map from human readable descriptions to
+// extended error info codes.
+var StringToExtendedErrorCode = reverseInt16(ExtendedErrorCodeToString)
+
+// EDNS0_EDE option is used to return additional information about the cause of
+// DNS errors.
+type EDNS0_EDE struct {
+ InfoCode uint16
+ ExtraText string
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_EDE) Option() uint16 { return EDNS0EDE }
+func (e *EDNS0_EDE) copy() EDNS0 { return &EDNS0_EDE{e.InfoCode, e.ExtraText} }
+
+func (e *EDNS0_EDE) String() string {
+ info := strconv.FormatUint(uint64(e.InfoCode), 10)
+ if s, ok := ExtendedErrorCodeToString[e.InfoCode]; ok {
+ info += fmt.Sprintf(" (%s)", s)
+ }
+ return fmt.Sprintf("%s: (%s)", info, e.ExtraText)
+}
+
+func (e *EDNS0_EDE) pack() ([]byte, error) {
+ b := make([]byte, 2+len(e.ExtraText))
+ binary.BigEndian.PutUint16(b[0:], e.InfoCode)
+ copy(b[2:], []byte(e.ExtraText))
+ return b, nil
+}
+
+func (e *EDNS0_EDE) unpack(b []byte) error {
+ if len(b) < 2 {
+ return ErrBuf
+ }
+ e.InfoCode = binary.BigEndian.Uint16(b[0:])
+ e.ExtraText = string(b[2:])
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go
index f713074a1..ac8df34dd 100644
--- a/vendor/github.com/miekg/dns/generate.go
+++ b/vendor/github.com/miekg/dns/generate.go
@@ -75,10 +75,10 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
r := &generateReader{
s: s,
- cur: int(start),
- start: int(start),
- end: int(end),
- step: int(step),
+ cur: start,
+ start: start,
+ end: end,
+ step: step,
file: zp.file,
lex: &l,
@@ -94,10 +94,10 @@ type generateReader struct {
s string
si int
- cur int
- start int
- end int
- step int
+ cur int64
+ start int64
+ end int64
+ step int64
mod bytes.Buffer
@@ -173,7 +173,7 @@ func (r *generateReader) ReadByte() (byte, error) {
return '$', nil
}
- var offset int
+ var offset int64
// Search for { and }
if r.s[si+1] == '{' {
@@ -188,7 +188,7 @@ func (r *generateReader) ReadByte() (byte, error) {
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
- if r.start+offset < 0 || int64(r.end) + int64(offset) > 1<<31-1 {
+ if r.start+offset < 0 || r.end+offset > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
}
@@ -208,7 +208,7 @@ func (r *generateReader) ReadByte() (byte, error) {
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
-func modToPrintf(s string) (string, int, string) {
+func modToPrintf(s string) (string, int64, string) {
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary.
var offStr, widthStr, base string
@@ -240,8 +240,8 @@ func modToPrintf(s string) (string, int, string) {
}
if width == 0 {
- return "%" + base, int(offset), ""
+ return "%" + base, offset, ""
}
- return "%0" + widthStr + base, int(offset), ""
+ return "%0" + widthStr + base, offset, ""
}
diff --git a/vendor/github.com/miekg/dns/go.mod b/vendor/github.com/miekg/dns/go.mod
index 6003d0573..aff65114d 100644
--- a/vendor/github.com/miekg/dns/go.mod
+++ b/vendor/github.com/miekg/dns/go.mod
@@ -1,11 +1,9 @@
module github.com/miekg/dns
-go 1.12
+go 1.14
require (
- golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
- golang.org/x/net v0.0.0-20190923162816-aa69164e4478
- golang.org/x/sync v0.0.0-20190423024810-112230192c58
- golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe
- golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect
+ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
+ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
+ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04
)
diff --git a/vendor/github.com/miekg/dns/go.sum b/vendor/github.com/miekg/dns/go.sum
index 96bda3a94..3359ebea4 100644
--- a/vendor/github.com/miekg/dns/go.sum
+++ b/vendor/github.com/miekg/dns/go.sum
@@ -1,39 +1,10 @@
-golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4 h1:Vk3wNqEZwyGyei9yq5ekj7frek2u7HUfffJ1/opblzc=
-golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM=
-golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0=
-golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611 h1:O33LKL7WyJgjN9CvxfTIomjIClbd/Kq86/iipowHQU0=
-golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04 h1:cEhElsAv9LUt9ZUUocxzWe05oFLVd+AA2nstydTeI8g=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA=
-golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go
index df1675dfd..f9faacfeb 100644
--- a/vendor/github.com/miekg/dns/labels.go
+++ b/vendor/github.com/miekg/dns/labels.go
@@ -10,7 +10,7 @@ package dns
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
- if len(s) == 0 {
+ if s == "" {
return nil
}
fqdnEnd := 0 // offset of the final '.' or the length of the name
diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_no_reuseport.go
similarity index 100%
rename from vendor/github.com/miekg/dns/listen_go_not111.go
rename to vendor/github.com/miekg/dns/listen_no_reuseport.go
diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_reuseport.go
similarity index 100%
rename from vendor/github.com/miekg/dns/listen_go111.go
rename to vendor/github.com/miekg/dns/listen_reuseport.go
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
index 7001f6da7..ead4b6931 100644
--- a/vendor/github.com/miekg/dns/msg.go
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -624,11 +624,18 @@ func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err
rr = &RFC3597{Hdr: h}
}
- if noRdata(h) {
- return rr, off, nil
+ if off < 0 || off > len(msg) {
+ return &h, off, &Error{err: "bad off"}
}
end := off + int(h.Rdlength)
+ if end < off || end > len(msg) {
+ return &h, end, &Error{err: "bad rdlength"}
+ }
+
+ if noRdata(h) {
+ return rr, off, nil
+ }
off, err = rr.unpack(msg, off)
if err != nil {
@@ -735,7 +742,7 @@ func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compression
}
// Set extended rcode unconditionally if we have an opt, this will allow
- // reseting the extended rcode bits if they need to.
+ // resetting the extended rcode bits if they need to.
if opt := dns.IsEdns0(); opt != nil {
opt.SetExtendedRcode(uint16(dns.Rcode))
} else if dns.Rcode > 0xF {
diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
index 47625ed09..5904927ca 100644
--- a/vendor/github.com/miekg/dns/msg_helpers.go
+++ b/vendor/github.com/miekg/dns/msg_helpers.go
@@ -438,35 +438,6 @@ Option:
return edns, off, nil
}
-func makeDataOpt(code uint16) EDNS0 {
- switch code {
- case EDNS0NSID:
- return new(EDNS0_NSID)
- case EDNS0SUBNET:
- return new(EDNS0_SUBNET)
- case EDNS0COOKIE:
- return new(EDNS0_COOKIE)
- case EDNS0EXPIRE:
- return new(EDNS0_EXPIRE)
- case EDNS0UL:
- return new(EDNS0_UL)
- case EDNS0LLQ:
- return new(EDNS0_LLQ)
- case EDNS0DAU:
- return new(EDNS0_DAU)
- case EDNS0DHU:
- return new(EDNS0_DHU)
- case EDNS0N3U:
- return new(EDNS0_N3U)
- case EDNS0PADDING:
- return new(EDNS0_PADDING)
- default:
- e := new(EDNS0_LOCAL)
- e.Code = code
- return e
- }
-}
-
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/miekg/dns/msg_truncate.go
index 156c5a0e8..2ddc9a7da 100644
--- a/vendor/github.com/miekg/dns/msg_truncate.go
+++ b/vendor/github.com/miekg/dns/msg_truncate.go
@@ -8,6 +8,11 @@ package dns
// record adding as many records as possible without exceeding the
// requested buffer size.
//
+// If the message fits within the requested size without compression,
+// Truncate will set the message's Compress attribute to false. It is
+// the caller's responsibility to set it back to true if they wish to
+// compress the payload regardless of size.
+//
// The TC bit will be set if any records were excluded from the message.
// If the TC bit is already set on the message it will be retained.
// TC indicates that the client should retry over TCP.
diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go
index cda6cae31..d256b652e 100644
--- a/vendor/github.com/miekg/dns/privaterr.go
+++ b/vendor/github.com/miekg/dns/privaterr.go
@@ -6,7 +6,7 @@ import "strings"
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
- // String returns the text presentaton of the Rdata of the Private RR.
+ // String returns the text presentation of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
@@ -90,7 +90,7 @@ Fetch:
return nil
}
-func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
+func (r *PrivateRR) isDuplicate(r2 RR) bool { return false }
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
index aa2840efb..57be98827 100644
--- a/vendor/github.com/miekg/dns/scan.go
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -150,6 +150,9 @@ func ReadRR(r io.Reader, file string) (RR, error) {
// The text "; this is comment" is returned from Comment. Comments inside
// the RR are returned concatenated along with the RR. Comments on a line
// by themselves are discarded.
+//
+// Callers should not assume all returned data in an Resource Record is
+// syntactically correct, e.g. illegal base64 in RRSIGs will be returned as-is.
type ZoneParser struct {
c *zlexer
@@ -577,10 +580,23 @@ func (zp *ZoneParser) Next() (RR, bool) {
st = zExpectRdata
case zExpectRdata:
- var rr RR
- if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) {
+ var (
+ rr RR
+ parseAsRFC3597 bool
+ )
+ if newFn, ok := TypeToRR[h.Rrtype]; ok {
rr = newFn()
*rr.Header() = *h
+
+ // We may be parsing a known RR type using the RFC3597 format.
+ // If so, we handle that here in a generic way.
+ //
+ // This is also true for PrivateRR types which will have the
+ // RFC3597 parsing done for them and the Unpack method called
+ // to populate the RR instead of simply deferring to Parse.
+ if zp.c.Peek().token == "\\#" {
+ parseAsRFC3597 = true
+ }
} else {
rr = &RFC3597{Hdr: *h}
}
@@ -600,13 +616,18 @@ func (zp *ZoneParser) Next() (RR, bool) {
return zp.setParseError("unexpected newline", l)
}
- if err := rr.parse(zp.c, zp.origin); err != nil {
+ parseAsRR := rr
+ if parseAsRFC3597 {
+ parseAsRR = &RFC3597{Hdr: *h}
+ }
+
+ if err := parseAsRR.parse(zp.c, zp.origin); err != nil {
// err is a concrete *ParseError without the file field set.
// The setParseError call below will construct a new
// *ParseError with file set to zp.file.
- // If err.lex is nil than we have encounter an unknown RR type
- // in that case we substitute our current lex token.
+ // err.lex may be nil in which case we substitute our current
+ // lex token.
if err.lex == (lex{}) {
return zp.setParseError(err.err, l)
}
@@ -614,6 +635,13 @@ func (zp *ZoneParser) Next() (RR, bool) {
return zp.setParseError(err.err, err.lex)
}
+ if parseAsRFC3597 {
+ err := parseAsRR.(*RFC3597).fromRFC3597(rr)
+ if err != nil {
+ return zp.setParseError(err.Error(), l)
+ }
+ }
+
return rr, true
}
}
@@ -623,18 +651,6 @@ func (zp *ZoneParser) Next() (RR, bool) {
return nil, false
}
-// canParseAsRR returns true if the record type can be parsed as a
-// concrete RR. It blacklists certain record types that must be parsed
-// according to RFC 3597 because they lack a presentation format.
-func canParseAsRR(rrtype uint16) bool {
- switch rrtype {
- case TypeANY, TypeNULL, TypeOPT, TypeTSIG:
- return false
- default:
- return true
- }
-}
-
type zlexer struct {
br io.ByteReader
@@ -1220,7 +1236,7 @@ func stringToCm(token string) (e, m uint8, ok bool) {
// 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm.
cmeters *= 10
}
- if len(s[0]) == 0 {
+ if s[0] == "" {
// This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm).
break
}
@@ -1290,7 +1306,7 @@ func appendOrigin(name, origin string) string {
// LOC record helper function
func locCheckNorth(token string, latitude uint32) (uint32, bool) {
- if latitude > 90 * 1000 * 60 * 60 {
+ if latitude > 90*1000*60*60 {
return latitude, false
}
switch token {
@@ -1304,7 +1320,7 @@ func locCheckNorth(token string, latitude uint32) (uint32, bool) {
// LOC record helper function
func locCheckEast(token string, longitude uint32) (uint32, bool) {
- if longitude > 180 * 1000 * 60 * 60 {
+ if longitude > 180*1000*60*60 {
return longitude, false
}
switch token {
@@ -1339,7 +1355,7 @@ func stringToNodeID(l lex) (uint64, *ParseError) {
if len(l.token) < 19 {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
- // There must be three colons at fixes postitions, if not its a parse error
+ // There must be three colons at fixes positions, if not its a parse error
if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index 69f10052f..e398484da 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -609,7 +609,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank
l, _ = c.Next()
- if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err || i < 0 || i >= 60 {
+ if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 {
return &ParseError{"", "bad LOC Latitude seconds", l}
} else {
rr.Latitude += uint32(1000 * i)
@@ -645,7 +645,7 @@ East:
}
c.Next() // zBlank
l, _ = c.Next()
- if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err || i < 0 || i >= 60 {
+ if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 {
return &ParseError{"", "bad LOC Longitude seconds", l}
} else {
rr.Longitude += uint32(1000 * i)
@@ -662,7 +662,7 @@ East:
Altitude:
c.Next() // zBlank
l, _ = c.Next()
- if len(l.token) == 0 || l.err {
+ if l.token == "" || l.err {
return &ParseError{"", "bad LOC Altitude", l}
}
if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
@@ -722,7 +722,7 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank
l, _ = c.Next() // zString
- if len(l.token) == 0 || l.err {
+ if l.token == "" || l.err {
return &ParseError{"", "bad HIP Hit", l}
}
rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
@@ -730,11 +730,15 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank
l, _ = c.Next() // zString
- if len(l.token) == 0 || l.err {
+ if l.token == "" || l.err {
return &ParseError{"", "bad HIP PublicKey", l}
}
rr.PublicKey = l.token // This cannot contain spaces
- rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey)))
+ decodedPK, decodedPKerr := base64.StdEncoding.DecodeString(rr.PublicKey)
+ if decodedPKerr != nil {
+ return &ParseError{"", "bad HIP PublicKey", l}
+ }
+ rr.PublicKeyLength = uint16(len(decodedPK))
// RendezvousServers (if any)
l, _ = c.Next()
@@ -846,6 +850,38 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
return nil
}
+func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 32)
+ if e != nil || l.err {
+ return &ParseError{"", "bad ZONEMD Serial", l}
+ }
+ rr.Serial = uint32(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e1 := strconv.ParseUint(l.token, 10, 8)
+ if e1 != nil || l.err {
+ return &ParseError{"", "bad ZONEMD Scheme", l}
+ }
+ rr.Scheme = uint8(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, err := strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad ZONEMD Hash Algorithm", l}
+ }
+ rr.Hash = uint8(i)
+
+ s, e2 := endingToString(c, "bad ZONEMD Digest")
+ if e2 != nil {
+ return e2
+ }
+ rr.Digest = s
+ return nil
+}
+
func (rr *SIG) parse(c *zlexer, o string) *ParseError { return rr.RRSIG.parse(c, o) }
func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
@@ -997,7 +1033,7 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
rr.Iterations = uint16(i)
c.Next()
l, _ = c.Next()
- if len(l.token) == 0 || l.err {
+ if l.token == "" || l.err {
return &ParseError{"", "bad NSEC3 Salt", l}
}
if l.token != "-" {
@@ -1007,7 +1043,7 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
c.Next()
l, _ = c.Next()
- if len(l.token) == 0 || l.err {
+ if l.token == "" || l.err {
return &ParseError{"", "bad NSEC3 NextDomain", l}
}
rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
@@ -1387,7 +1423,7 @@ func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank
l, _ = c.Next()
- rdlength, e := strconv.Atoi(l.token)
+ rdlength, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad RFC3597 Rdata ", l}
}
@@ -1396,7 +1432,7 @@ func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
if e1 != nil {
return e1
}
- if rdlength*2 != len(s) {
+ if int(rdlength)*2 != len(s) {
return &ParseError{"", "bad RFC3597 Rdata", l}
}
rr.Rdata = s
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
index 30dfd41de..b2a63bda4 100644
--- a/vendor/github.com/miekg/dns/server.go
+++ b/vendor/github.com/miekg/dns/server.go
@@ -321,6 +321,7 @@ func (srv *Server) ListenAndServe() error {
}
u := l.(*net.UDPConn)
if e := setUDPSocketOptions(u); e != nil {
+ u.Close()
return e
}
srv.PacketConn = l
@@ -752,11 +753,10 @@ func (w *response) Write(m []byte) (int, error) {
return 0, &Error{err: "message too large"}
}
- l := make([]byte, 2)
- binary.BigEndian.PutUint16(l, uint16(len(m)))
-
- n, err := (&net.Buffers{l, m}).WriteTo(w.tcp)
- return int(n), err
+ msg := make([]byte, 2+len(m))
+ binary.BigEndian.PutUint16(msg, uint16(len(m)))
+ copy(msg[2:], m)
+ return w.tcp.Write(msg)
default:
panic("dns: internal error: udp and tcp both nil")
}
diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go
index 9ef13ccf3..e781c9bb6 100644
--- a/vendor/github.com/miekg/dns/sig0.go
+++ b/vendor/github.com/miekg/dns/sig0.go
@@ -17,7 +17,7 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
if k == nil {
return nil, ErrPrivKey
}
- if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ if rr.KeyTag == 0 || rr.SignerName == "" || rr.Algorithm == 0 {
return nil, ErrKey
}
@@ -78,7 +78,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
if k == nil {
return ErrKey
}
- if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ if rr.KeyTag == 0 || rr.SignerName == "" || rr.Algorithm == 0 {
return ErrKey
}
diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go
index f44dc67d7..3344253c2 100644
--- a/vendor/github.com/miekg/dns/svcb.go
+++ b/vendor/github.com/miekg/dns/svcb.go
@@ -10,6 +10,7 @@ import (
"strings"
)
+// SVCBKey is the type of the keys used in the SVCB RR.
type SVCBKey uint16
// Keys defined in draft-ietf-dnsop-svcb-https-01 Section 12.3.2.
@@ -204,7 +205,7 @@ type SVCB struct {
Hdr RR_Header
Priority uint16
Target string `dns:"domain-name"`
- Value []SVCBKeyValue `dns:"pairs"` // Value must be empty if Priority is non-zero.
+ Value []SVCBKeyValue `dns:"pairs"` // Value must be empty if Priority is zero.
}
// HTTPS RR. Everything valid for SVCB applies to HTTPS as well.
@@ -321,7 +322,7 @@ func (s *SVCBAlpn) pack() ([]byte, error) {
// Liberally estimate the size of an alpn as 10 octets
b := make([]byte, 0, 10*len(s.Alpn))
for _, e := range s.Alpn {
- if len(e) == 0 {
+ if e == "" {
return nil, errors.New("dns: svcbalpn: empty alpn-id")
}
if len(e) > 255 {
@@ -390,7 +391,7 @@ func (*SVCBNoDefaultAlpn) unpack(b []byte) error {
}
func (*SVCBNoDefaultAlpn) parse(b string) error {
- if len(b) != 0 {
+ if b != "" {
return errors.New("dns: svcbnodefaultalpn: no_default_alpn must have no value")
}
return nil
@@ -511,8 +512,13 @@ func (s *SVCBIPv4Hint) parse(b string) error {
}
func (s *SVCBIPv4Hint) copy() SVCBKeyValue {
+ hint := make([]net.IP, len(s.Hint))
+ for i, ip := range s.Hint {
+ hint[i] = copyIP(ip)
+ }
+
return &SVCBIPv4Hint{
- append([]net.IP(nil), s.Hint...),
+ Hint: hint,
}
}
@@ -629,8 +635,13 @@ func (s *SVCBIPv6Hint) parse(b string) error {
}
func (s *SVCBIPv6Hint) copy() SVCBKeyValue {
+ hint := make([]net.IP, len(s.Hint))
+ for i, ip := range s.Hint {
+ hint[i] = copyIP(ip)
+ }
+
return &SVCBIPv6Hint{
- append([]net.IP(nil), s.Hint...),
+ Hint: hint,
}
}
diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go
index 59904dd6a..b49562d84 100644
--- a/vendor/github.com/miekg/dns/tsig.go
+++ b/vendor/github.com/miekg/dns/tsig.go
@@ -24,6 +24,56 @@ const (
HmacMD5 = "hmac-md5.sig-alg.reg.int." // Deprecated: HmacMD5 is no longer supported.
)
+// TsigProvider provides the API to plug-in a custom TSIG implementation.
+type TsigProvider interface {
+ // Generate is passed the DNS message to be signed and the partial TSIG RR. It returns the signature and nil, otherwise an error.
+ Generate(msg []byte, t *TSIG) ([]byte, error)
+ // Verify is passed the DNS message to be verified and the TSIG RR. If the signature is valid it will return nil, otherwise an error.
+ Verify(msg []byte, t *TSIG) error
+}
+
+type tsigHMACProvider string
+
+func (key tsigHMACProvider) Generate(msg []byte, t *TSIG) ([]byte, error) {
+ // If we barf here, the caller is to blame
+ rawsecret, err := fromBase64([]byte(key))
+ if err != nil {
+ return nil, err
+ }
+ var h hash.Hash
+ switch CanonicalName(t.Algorithm) {
+ case HmacSHA1:
+ h = hmac.New(sha1.New, rawsecret)
+ case HmacSHA224:
+ h = hmac.New(sha256.New224, rawsecret)
+ case HmacSHA256:
+ h = hmac.New(sha256.New, rawsecret)
+ case HmacSHA384:
+ h = hmac.New(sha512.New384, rawsecret)
+ case HmacSHA512:
+ h = hmac.New(sha512.New, rawsecret)
+ default:
+ return nil, ErrKeyAlg
+ }
+ h.Write(msg)
+ return h.Sum(nil), nil
+}
+
+func (key tsigHMACProvider) Verify(msg []byte, t *TSIG) error {
+ b, err := key.Generate(msg, t)
+ if err != nil {
+ return err
+ }
+ mac, err := hex.DecodeString(t.MAC)
+ if err != nil {
+ return err
+ }
+ if !hmac.Equal(b, mac) {
+ return ErrSig
+ }
+ return nil
+}
+
// TSIG is the RR the holds the transaction signature of a message.
// See RFC 2845 and RFC 4635.
type TSIG struct {
@@ -56,8 +106,8 @@ func (rr *TSIG) String() string {
return s
}
-func (rr *TSIG) parse(c *zlexer, origin string) *ParseError {
- panic("dns: internal error: parse should never be called on TSIG")
+func (*TSIG) parse(c *zlexer, origin string) *ParseError {
+ return &ParseError{err: "TSIG records do not have a presentation format"}
}
// The following values must be put in wireformat, so that the MAC can be calculated.
@@ -98,14 +148,13 @@ type timerWireFmt struct {
// timersOnly is false.
// If something goes wrong an error is returned, otherwise it is nil.
func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
+ return tsigGenerateProvider(m, tsigHMACProvider(secret), requestMAC, timersOnly)
+}
+
+func tsigGenerateProvider(m *Msg, provider TsigProvider, requestMAC string, timersOnly bool) ([]byte, string, error) {
if m.IsTsig() == nil {
panic("dns: TSIG not last RR in additional")
}
- // If we barf here, the caller is to blame
- rawsecret, err := fromBase64([]byte(secret))
- if err != nil {
- return nil, "", err
- }
rr := m.Extra[len(m.Extra)-1].(*TSIG)
m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
@@ -119,25 +168,13 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
}
t := new(TSIG)
- var h hash.Hash
- switch CanonicalName(rr.Algorithm) {
- case HmacSHA1:
- h = hmac.New(sha1.New, rawsecret)
- case HmacSHA224:
- h = hmac.New(sha256.New224, rawsecret)
- case HmacSHA256:
- h = hmac.New(sha256.New, rawsecret)
- case HmacSHA384:
- h = hmac.New(sha512.New384, rawsecret)
- case HmacSHA512:
- h = hmac.New(sha512.New, rawsecret)
- default:
- return nil, "", ErrKeyAlg
- }
- h.Write(buf)
// Copy all TSIG fields except MAC and its size, which are filled using the computed digest.
*t = *rr
- t.MAC = hex.EncodeToString(h.Sum(nil))
+ mac, err := provider.Generate(buf, rr)
+ if err != nil {
+ return nil, "", err
+ }
+ t.MAC = hex.EncodeToString(mac)
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
tbuf := make([]byte, Len(t))
@@ -156,49 +193,28 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
// If the signature does not validate err contains the
// error, otherwise it is nil.
func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
- return tsigVerify(msg, secret, requestMAC, timersOnly, uint64(time.Now().Unix()))
+ return tsigVerify(msg, tsigHMACProvider(secret), requestMAC, timersOnly, uint64(time.Now().Unix()))
+}
+
+func tsigVerifyProvider(msg []byte, provider TsigProvider, requestMAC string, timersOnly bool) error {
+ return tsigVerify(msg, provider, requestMAC, timersOnly, uint64(time.Now().Unix()))
}
// actual implementation of TsigVerify, taking the current time ('now') as a parameter for the convenience of tests.
-func tsigVerify(msg []byte, secret, requestMAC string, timersOnly bool, now uint64) error {
- rawsecret, err := fromBase64([]byte(secret))
- if err != nil {
- return err
- }
+func tsigVerify(msg []byte, provider TsigProvider, requestMAC string, timersOnly bool, now uint64) error {
// Strip the TSIG from the incoming msg
stripped, tsig, err := stripTsig(msg)
if err != nil {
return err
}
- msgMAC, err := hex.DecodeString(tsig.MAC)
- if err != nil {
- return err
- }
-
buf, err := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
if err != nil {
return err
}
- var h hash.Hash
- switch CanonicalName(tsig.Algorithm) {
- case HmacSHA1:
- h = hmac.New(sha1.New, rawsecret)
- case HmacSHA224:
- h = hmac.New(sha256.New224, rawsecret)
- case HmacSHA256:
- h = hmac.New(sha256.New, rawsecret)
- case HmacSHA384:
- h = hmac.New(sha512.New384, rawsecret)
- case HmacSHA512:
- h = hmac.New(sha512.New, rawsecret)
- default:
- return ErrKeyAlg
- }
- h.Write(buf)
- if !hmac.Equal(h.Sum(nil), msgMAC) {
- return ErrSig
+ if err := provider.Verify(buf, tsig); err != nil {
+ return err
}
// Fudge factor works both ways. A message can arrive before it was signed because
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index 1f385bd22..d9becb67c 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -81,6 +81,7 @@ const (
TypeCDNSKEY uint16 = 60
TypeOPENPGPKEY uint16 = 61
TypeCSYNC uint16 = 62
+ TypeZONEMD uint16 = 63
TypeSVCB uint16 = 64
TypeHTTPS uint16 = 65
TypeSPF uint16 = 99
@@ -150,6 +151,14 @@ const (
OpcodeUpdate = 5
)
+// Used in ZONEMD https://tools.ietf.org/html/rfc8976
+const (
+ ZoneMDSchemeSimple = 1
+
+ ZoneMDHashAlgSHA384 = 1
+ ZoneMDHashAlgSHA512 = 2
+)
+
// Header is the wire format for the DNS packet header.
type Header struct {
Id uint16
@@ -245,8 +254,8 @@ type ANY struct {
func (rr *ANY) String() string { return rr.Hdr.String() }
-func (rr *ANY) parse(c *zlexer, origin string) *ParseError {
- panic("dns: internal error: parse should never be called on ANY")
+func (*ANY) parse(c *zlexer, origin string) *ParseError {
+ return &ParseError{err: "ANY records do not have a presentation format"}
}
// NULL RR. See RFC 1035.
@@ -260,8 +269,8 @@ func (rr *NULL) String() string {
return ";" + rr.Hdr.String() + rr.Data
}
-func (rr *NULL) parse(c *zlexer, origin string) *ParseError {
- panic("dns: internal error: parse should never be called on NULL")
+func (*NULL) parse(c *zlexer, origin string) *ParseError {
+ return &ParseError{err: "NULL records do not have a presentation format"}
}
// CNAME RR. See RFC 1034.
@@ -1361,6 +1370,23 @@ func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
return l
}
+// ZONEMD RR, from draft-ietf-dnsop-dns-zone-digest
+type ZONEMD struct {
+ Hdr RR_Header
+ Serial uint32
+ Scheme uint8
+ Hash uint8
+ Digest string `dns:"hex"`
+}
+
+func (rr *ZONEMD) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Serial)) +
+ " " + strconv.Itoa(int(rr.Scheme)) +
+ " " + strconv.Itoa(int(rr.Hash)) +
+ " " + rr.Digest
+}
+
// APL RR. See RFC 3123.
type APL struct {
Hdr RR_Header
@@ -1387,13 +1413,13 @@ func (rr *APL) String() string {
}
// str returns presentation form of the APL prefix.
-func (p *APLPrefix) str() string {
+func (a *APLPrefix) str() string {
var sb strings.Builder
- if p.Negation {
+ if a.Negation {
sb.WriteByte('!')
}
- switch len(p.Network.IP) {
+ switch len(a.Network.IP) {
case net.IPv4len:
sb.WriteByte('1')
case net.IPv6len:
@@ -1402,20 +1428,20 @@ func (p *APLPrefix) str() string {
sb.WriteByte(':')
- switch len(p.Network.IP) {
+ switch len(a.Network.IP) {
case net.IPv4len:
- sb.WriteString(p.Network.IP.String())
+ sb.WriteString(a.Network.IP.String())
case net.IPv6len:
// add prefix for IPv4-mapped IPv6
- if v4 := p.Network.IP.To4(); v4 != nil {
+ if v4 := a.Network.IP.To4(); v4 != nil {
sb.WriteString("::ffff:")
}
- sb.WriteString(p.Network.IP.String())
+ sb.WriteString(a.Network.IP.String())
}
sb.WriteByte('/')
- prefix, _ := p.Network.Mask.Size()
+ prefix, _ := a.Network.Mask.Size()
sb.WriteString(strconv.Itoa(prefix))
return sb.String()
@@ -1429,17 +1455,17 @@ func (a *APLPrefix) equals(b *APLPrefix) bool {
}
// copy returns a copy of the APL prefix.
-func (p *APLPrefix) copy() APLPrefix {
+func (a *APLPrefix) copy() APLPrefix {
return APLPrefix{
- Negation: p.Negation,
- Network: copyNet(p.Network),
+ Negation: a.Negation,
+ Network: copyNet(a.Network),
}
}
// len returns size of the prefix in wire format.
-func (p *APLPrefix) len() int {
+func (a *APLPrefix) len() int {
// 4-byte header and the network address prefix (see Section 4 of RFC 3123)
- prefix, _ := p.Network.Mask.Size()
+ prefix, _ := a.Network.Mask.Size()
return 4 + (prefix+7)/8
}
@@ -1472,7 +1498,7 @@ func StringToTime(s string) (uint32, error) {
// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty.
func saltToString(s string) string {
- if len(s) == 0 {
+ if s == "" {
return "-"
}
return strings.ToUpper(s)
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index 5c75851b4..622c69a1b 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = v{1, 1, 35}
+var Version = v{1, 1, 43}
// v holds the version of this library.
type v struct {
diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go
index 0d3b34bd9..9eb1dac29 100644
--- a/vendor/github.com/miekg/dns/zduplicate.go
+++ b/vendor/github.com/miekg/dns/zduplicate.go
@@ -1317,3 +1317,24 @@ func (r1 *X25) isDuplicate(_r2 RR) bool {
}
return true
}
+
+func (r1 *ZONEMD) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*ZONEMD)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Serial != r2.Serial {
+ return false
+ }
+ if r1.Scheme != r2.Scheme {
+ return false
+ }
+ if r1.Hash != r2.Hash {
+ return false
+ }
+ if r1.Digest != r2.Digest {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
index d24a10fa2..fc0822f98 100644
--- a/vendor/github.com/miekg/dns/zmsg.go
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -1118,6 +1118,26 @@ func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bo
return off, nil
}
+func (rr *ZONEMD) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint32(rr.Serial, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Scheme, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Hash, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
// unpack*() functions
func (rr *A) unpack(msg []byte, off int) (off1 int, err error) {
@@ -2821,3 +2841,35 @@ func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) {
}
return off, nil
}
+
+func (rr *ZONEMD) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Serial, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Scheme, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Hash, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
index 11b51bf21..5d060cfee 100644
--- a/vendor/github.com/miekg/dns/ztypes.go
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -82,6 +82,7 @@ var TypeToRR = map[uint16]func() RR{
TypeUINFO: func() RR { return new(UINFO) },
TypeURI: func() RR { return new(URI) },
TypeX25: func() RR { return new(X25) },
+ TypeZONEMD: func() RR { return new(ZONEMD) },
}
// TypeToString is a map of strings for each RR type.
@@ -168,6 +169,7 @@ var TypeToString = map[uint16]string{
TypeUNSPEC: "UNSPEC",
TypeURI: "URI",
TypeX25: "X25",
+ TypeZONEMD: "ZONEMD",
TypeNSAPPTR: "NSAP-PTR",
}
@@ -245,6 +247,7 @@ func (rr *UID) Header() *RR_Header { return &rr.Hdr }
func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *URI) Header() *RR_Header { return &rr.Hdr }
func (rr *X25) Header() *RR_Header { return &rr.Hdr }
+func (rr *ZONEMD) Header() *RR_Header { return &rr.Hdr }
// len() functions
func (rr *A) len(off int, compression map[string]struct{}) int {
@@ -684,6 +687,14 @@ func (rr *X25) len(off int, compression map[string]struct{}) int {
l += len(rr.PSDNAddress) + 1
return l
}
+func (rr *ZONEMD) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 4 // Serial
+ l++ // Scheme
+ l++ // Hash
+ l += len(rr.Digest) / 2
+ return l
+}
// copy() functions
func (rr *A) copy() RR {
@@ -936,3 +947,6 @@ func (rr *URI) copy() RR {
func (rr *X25) copy() RR {
return &X25{rr.Hdr, rr.PSDNAddress}
}
+func (rr *ZONEMD) copy() RR {
+ return &ZONEMD{rr.Hdr, rr.Serial, rr.Scheme, rr.Hash, rr.Digest}
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
deleted file mode 100644
index 5e31a95a8..000000000
--- a/vendor/github.com/mitchellh/mapstructure/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - "1.14.x"
- - tip
-
-script:
- - go test
- - go test -bench . -benchmem
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
index 20eea2b7a..1955f2878 100644
--- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
+++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
@@ -1,3 +1,15 @@
+## unreleased
+
+* Fix regression where `*time.Time` value would be set to empty and not be sent
+ to decode hooks properly [GH-232]
+
+## 1.4.0
+
+* A new decode hook type `DecodeHookFuncValue` has been added that has
+ access to the full values. [GH-183]
+* Squash is now supported with embedded fields that are struct pointers [GH-205]
+* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
+
## 1.3.3
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
index 1f0abc65a..92e6f76ff 100644
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -1,6 +1,7 @@
package mapstructure
import (
+ "encoding"
"errors"
"fmt"
"net"
@@ -16,10 +17,11 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
var f1 DecodeHookFuncType
var f2 DecodeHookFuncKind
+ var f3 DecodeHookFuncValue
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
- potential := []interface{}{f1, f2}
+ potential := []interface{}{f1, f2, f3}
v := reflect.ValueOf(h)
vt := v.Type()
@@ -38,13 +40,15 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// that took reflect.Kind instead of reflect.Type.
func DecodeHookExec(
raw DecodeHookFunc,
- from reflect.Type, to reflect.Type,
- data interface{}) (interface{}, error) {
+ from reflect.Value, to reflect.Value) (interface{}, error) {
+
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
- return f(from, to, data)
+ return f(from.Type(), to.Type(), from.Interface())
case DecodeHookFuncKind:
- return f(from.Kind(), to.Kind(), data)
+ return f(from.Kind(), to.Kind(), from.Interface())
+ case DecodeHookFuncValue:
+ return f(from, to)
default:
return nil, errors.New("invalid decode hook signature")
}
@@ -56,22 +60,16 @@ func DecodeHookExec(
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
var err error
+ var data interface{}
+ newFrom := f
for _, f1 := range fs {
- data, err = DecodeHookExec(f1, f, t, data)
+ data, err = DecodeHookExec(f1, newFrom, t)
if err != nil {
return nil, err
}
-
- // Modify the from kind to be correct with the new data
- f = nil
- if val := reflect.ValueOf(data); val.IsValid() {
- f = val.Type()
- }
+ newFrom = reflect.ValueOf(data)
}
return data, nil
@@ -215,3 +213,44 @@ func WeaklyTypedHook(
return data, nil
}
+
+func RecursiveStructToMapHookFunc() DecodeHookFunc {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ if f.Kind() != reflect.Struct {
+ return f.Interface(), nil
+ }
+
+ var i interface{} = struct{}{}
+ if t.Type() != reflect.TypeOf(&i).Elem() {
+ return f.Interface(), nil
+ }
+
+ m := make(map[string]interface{})
+ t.Set(reflect.ValueOf(m))
+
+ return f.Interface(), nil
+ }
+}
+
+// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
+// strings to the UnmarshalText function, when the target type
+// implements the encoding.TextUnmarshaler interface
+func TextUnmarshallerHookFunc() DecodeHookFuncType {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ result := reflect.New(t).Interface()
+ unmarshaller, ok := result.(encoding.TextUnmarshaler)
+ if !ok {
+ return data, nil
+ }
+ if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index f41bcc58f..3643901f5 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -72,6 +72,17 @@
// "name": "alice",
// }
//
+// When decoding from a struct to a map, the squash tag squashes the struct
+// fields into a single map. Using the example structs from above:
+//
+// Friend{Person: Person{Name: "alice"}}
+//
+// Will be decoded into a map:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
// DecoderConfig has a field that changes the behavior of mapstructure
// to always squash embedded structs.
//
@@ -161,10 +172,11 @@ import (
// data transformations. See "DecodeHook" in the DecoderConfig
// struct.
//
-// The type should be DecodeHookFuncType or DecodeHookFuncKind.
-// Either is accepted. Types are a superset of Kinds (Types can return
-// Kinds) and are generally a richer thing to use, but Kinds are simpler
-// if you only need those.
+// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
+// DecodeHookFuncValue.
+// Values are a superset of Types (Values can return types), and Types are a
+// superset of Kinds (Types can return Kinds) and are generally a richer thing
+// to use, but Kinds are simpler if you only need those.
//
// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
// we started with Kinds and then realized Types were the better solution,
@@ -180,15 +192,22 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface
// source and target types.
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target
+// values.
+type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
+
// DecoderConfig is the configuration that is used to create a new decoder
// and allows customization of various aspects of decoding.
type DecoderConfig struct {
// DecodeHook, if set, will be called before any decoding and any
// type conversion (if WeaklyTypedInput is on). This lets you modify
- // the values before they're set down onto the resulting struct.
+ // the values before they're set down onto the resulting struct. The
+ // DecodeHook is called for every map and value in the input. This means
+ // that if a struct has embedded fields with squash tags the decode hook
+ // is called only once with all of the input data, not once for each
+ // embedded struct.
//
- // If an error is returned, the entire decode will fail with that
- // error.
+ // If an error is returned, the entire decode will fail with that error.
DecodeHook DecodeHookFunc
// If ErrorUnused is true, then it is an error for there to exist
@@ -409,9 +428,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
if d.config.DecodeHook != nil {
// We have a DecodeHook, so let's pre-process the input.
var err error
- input, err = DecodeHookExec(
- d.config.DecodeHook,
- inputVal.Type(), outVal.Type(), input)
+ input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
if err != nil {
return fmt.Errorf("error decoding '%s': %s", name, err)
}
@@ -562,8 +579,8 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
if !converted {
return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
}
return nil
@@ -588,7 +605,12 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
val.SetInt(0)
}
case dataKind == reflect.String && d.config.WeaklyTypedInput:
- i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseInt(str, 0, val.Type().Bits())
if err == nil {
val.SetInt(i)
} else {
@@ -604,8 +626,8 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
val.SetInt(i)
default:
return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
}
return nil
@@ -640,7 +662,12 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
val.SetUint(0)
}
case dataKind == reflect.String && d.config.WeaklyTypedInput:
- i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseUint(str, 0, val.Type().Bits())
if err == nil {
val.SetUint(i)
} else {
@@ -660,8 +687,8 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
val.SetUint(uint64(i))
default:
return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
}
return nil
@@ -691,8 +718,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e
}
default:
return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
}
return nil
@@ -717,7 +744,12 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
val.SetFloat(0)
}
case dataKind == reflect.String && d.config.WeaklyTypedInput:
- f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ f, err := strconv.ParseFloat(str, val.Type().Bits())
if err == nil {
val.SetFloat(f)
} else {
@@ -733,8 +765,8 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
val.SetFloat(i)
default:
return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
}
return nil
@@ -785,7 +817,7 @@ func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val ref
for i := 0; i < dataVal.Len(); i++ {
err := d.decode(
- fmt.Sprintf("%s[%d]", name, i),
+ name+"["+strconv.Itoa(i)+"]",
dataVal.Index(i).Interface(), val)
if err != nil {
return err
@@ -818,7 +850,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
}
for _, k := range dataVal.MapKeys() {
- fieldName := fmt.Sprintf("%s[%s]", name, k)
+ fieldName := name + "[" + k.String() + "]"
// First decode the key into the proper type
currentKey := reflect.Indirect(reflect.New(valKeyType))
@@ -871,6 +903,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
// If Squash is set in the config, we squash the field down.
squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
+
// Determine the name of the key in the map
if index := strings.Index(tagValue, ","); index != -1 {
if tagValue[:index] == "-" {
@@ -883,8 +916,16 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
// If "squash" is specified in the tag, we squash the field down.
squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1
- if squash && v.Kind() != reflect.Struct {
- return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+ if squash {
+ // When squashing, the embedded type can be a pointer to a struct.
+ if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
+ v = v.Elem()
+ }
+
+ // The final type must be a struct
+ if v.Kind() != reflect.Struct {
+ return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+ }
}
keyName = tagValue[:index]
} else if len(tagValue) > 0 {
@@ -995,8 +1036,8 @@ func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) e
dataVal := reflect.Indirect(reflect.ValueOf(data))
if val.Type() != dataVal.Type() {
return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
}
val.Set(dataVal)
return nil
@@ -1062,7 +1103,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
}
currentField := valSlice.Index(i)
- fieldName := fmt.Sprintf("%s[%d]", name, i)
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
if err := d.decode(fieldName, currentData, currentField); err != nil {
errors = appendErrors(errors, err)
}
@@ -1129,7 +1170,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
currentData := dataVal.Index(i).Interface()
currentField := valArray.Index(i)
- fieldName := fmt.Sprintf("%s[%d]", name, i)
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
if err := d.decode(fieldName, currentData, currentField); err != nil {
errors = appendErrors(errors, err)
}
@@ -1232,10 +1273,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
for i := 0; i < structType.NumField(); i++ {
fieldType := structType.Field(i)
- fieldKind := fieldType.Type.Kind()
+ fieldVal := structVal.Field(i)
+ if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
+ // Handle embedded struct pointers as embedded structs.
+ fieldVal = fieldVal.Elem()
+ }
// If "squash" is specified in the tag, we squash the field down.
- squash := d.config.Squash && fieldKind == reflect.Struct && fieldType.Anonymous
+ squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
remain := false
// We always parse the tags cause we're looking for other tags too
@@ -1253,21 +1298,21 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
if squash {
- if fieldKind != reflect.Struct {
+ if fieldVal.Kind() != reflect.Struct {
errors = appendErrors(errors,
- fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+ fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
} else {
- structs = append(structs, structVal.FieldByName(fieldType.Name))
+ structs = append(structs, fieldVal)
}
continue
}
// Build our field
if remain {
- remainField = &field{fieldType, structVal.Field(i)}
+ remainField = &field{fieldType, fieldVal}
} else {
// Normal struct field, store it away
- fields = append(fields, field{fieldType, structVal.Field(i)})
+ fields = append(fields, field{fieldType, fieldVal})
}
}
}
@@ -1326,7 +1371,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
// If the name is empty string, then we're at the root, and we
// don't dot-join the fields.
if name != "" {
- fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ fieldName = name + "." + fieldName
}
if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
@@ -1373,7 +1418,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
for rawKey := range dataValKeysUnused {
key := rawKey.(string)
if name != "" {
- key = fmt.Sprintf("%s.%s", name, key)
+ key = name + "." + key
}
d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go
index 832df76e4..170307544 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Alert alert
+//
// swagger:model alert
type Alert struct {
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go
index 92a4b9182..3db729359 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go
@@ -22,14 +22,14 @@ package models
import (
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// AlertGroup alert group
+//
// swagger:model alertGroup
type AlertGroup struct {
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go
index 9cf8efe65..cb48c08e5 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go
@@ -22,13 +22,13 @@ package models
import (
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// AlertGroups alert groups
+//
// swagger:model alertGroups
type AlertGroups []*AlertGroup
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go
index cadbad364..9ee99f785 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go
@@ -22,14 +22,14 @@ package models
import (
"encoding/json"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// AlertStatus alert status
+//
// swagger:model alertStatus
type AlertStatus struct {
@@ -113,7 +113,7 @@ const (
// prop value enum
func (m *AlertStatus) validateStateEnum(path, location string, value string) error {
- if err := validate.Enum(path, location, value, alertStatusTypeStatePropEnum); err != nil {
+ if err := validate.EnumCase(path, location, value, alertStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go
index a7ef560d0..958114bbf 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// AlertmanagerConfig alertmanager config
+//
// swagger:model alertmanagerConfig
type AlertmanagerConfig struct {
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go
index 88afa7fe2..483beb23e 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// AlertmanagerStatus alertmanager status
+//
// swagger:model alertmanagerStatus
type AlertmanagerStatus struct {
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go
index 063caac43..a3373a729 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go
@@ -23,14 +23,14 @@ import (
"encoding/json"
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ClusterStatus cluster status
+//
// swagger:model clusterStatus
type ClusterStatus struct {
@@ -115,7 +115,7 @@ const (
// prop value enum
func (m *ClusterStatus) validateStatusEnum(path, location string, value string) error {
- if err := validate.Enum(path, location, value, clusterStatusTypeStatusPropEnum); err != nil {
+ if err := validate.EnumCase(path, location, value, clusterStatusTypeStatusPropEnum, true); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go
index c0c983486..2f74818c2 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go
@@ -22,14 +22,14 @@ package models
import (
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// GettableAlert gettable alert
+//
// swagger:model gettableAlert
type GettableAlert struct {
@@ -158,7 +158,6 @@ func (m GettableAlert) MarshalJSON() ([]byte, error) {
return nil, err
}
_parts = append(_parts, aO1)
-
return swag.ConcatJSON(_parts...), nil
}
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go
index fe7f1defd..f5a5c0421 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go
@@ -22,13 +22,13 @@ package models
import (
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// GettableAlerts gettable alerts
+//
// swagger:model gettableAlerts
type GettableAlerts []*GettableAlert
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go
index e2cad9319..8fb7a5129 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// GettableSilence gettable silence
+//
// swagger:model gettableSilence
type GettableSilence struct {
@@ -106,7 +106,6 @@ func (m GettableSilence) MarshalJSON() ([]byte, error) {
return nil, err
}
_parts = append(_parts, aO1)
-
return swag.ConcatJSON(_parts...), nil
}
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go
index 8f1604e3f..32d109ef7 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go
@@ -22,13 +22,13 @@ package models
import (
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// GettableSilences gettable silences
+//
// swagger:model gettableSilences
type GettableSilences []*GettableSilence
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go
index 0d1d35389..d7d298523 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go
@@ -20,10 +20,11 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/strfmt"
)
// LabelSet label set
+//
// swagger:model labelSet
type LabelSet map[string]string
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go
index eae3605a7..f2e2d6de8 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go
@@ -20,17 +20,20 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Matcher matcher
+//
// swagger:model matcher
type Matcher struct {
+ // is equal
+ IsEqual *bool `json:"isEqual,omitempty"`
+
// is regex
// Required: true
IsRegex *bool `json:"isRegex"`
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go
index bd2854897..3fb73c434 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go
@@ -22,14 +22,14 @@ package models
import (
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Matchers matchers
+//
// swagger:model matchers
type Matchers []*Matcher
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go
index 29e10ece6..204c3d785 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PeerStatus peer status
+//
// swagger:model peerStatus
type PeerStatus struct {
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go
index afa91ca96..88c06e835 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PostableAlert postable alert
+//
// swagger:model postableAlert
type PostableAlert struct {
@@ -104,7 +104,6 @@ func (m PostableAlert) MarshalJSON() ([]byte, error) {
return nil, err
}
_parts = append(_parts, aO1)
-
return swag.ConcatJSON(_parts...), nil
}
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go
index 7097adcaf..9a1356368 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go
@@ -22,13 +22,13 @@ package models
import (
"strconv"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PostableAlerts postable alerts
+//
// swagger:model postableAlerts
type PostableAlerts []*PostableAlert
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go
index 45efa4ff7..c77a9534a 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go
@@ -20,13 +20,13 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PostableSilence postable silence
+//
// swagger:model postableSilence
type PostableSilence struct {
@@ -79,7 +79,6 @@ func (m PostableSilence) MarshalJSON() ([]byte, error) {
return nil, err
}
_parts = append(_parts, aO1)
-
return swag.ConcatJSON(_parts...), nil
}
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go
index 3686e6d19..9f85db60a 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Receiver receiver
+//
// swagger:model receiver
type Receiver struct {
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go
index 0a842a301..27fb9f3d1 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Silence silence
+//
// swagger:model silence
type Silence struct {
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go
index 669269e18..0c63df853 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go
@@ -22,14 +22,14 @@ package models
import (
"encoding/json"
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// SilenceStatus silence status
+//
// swagger:model silenceStatus
type SilenceStatus struct {
@@ -79,7 +79,7 @@ const (
// prop value enum
func (m *SilenceStatus) validateStateEnum(path, location string, value string) error {
- if err := validate.Enum(path, location, value, silenceStatusTypeStatePropEnum); err != nil {
+ if err := validate.EnumCase(path, location, value, silenceStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go
index 232d805d2..f7124eca8 100644
--- a/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go
+++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go
@@ -20,14 +20,14 @@ package models
// Editing this file might prove futile when you re-run the swagger generate command
import (
- strfmt "github.com/go-openapi/strfmt"
-
"github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// VersionInfo version info
+//
// swagger:model versionInfo
type VersionInfo struct {
diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go
index 138a33b7d..0c8de071c 100644
--- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go
+++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go
@@ -123,6 +123,7 @@ const (
epAlertManagers = apiPrefix + "/alertmanagers"
epQuery = apiPrefix + "/query"
epQueryRange = apiPrefix + "/query_range"
+ epQueryExemplars = apiPrefix + "/query_exemplars"
epLabels = apiPrefix + "/labels"
epLabelValues = apiPrefix + "/label/:name/values"
epSeries = apiPrefix + "/series"
@@ -135,6 +136,7 @@ const (
epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
epConfig = apiPrefix + "/status/config"
epFlags = apiPrefix + "/status/flags"
+ epBuildinfo = apiPrefix + "/status/buildinfo"
epRuntimeinfo = apiPrefix + "/status/runtimeinfo"
epTSDB = apiPrefix + "/status/tsdb"
)
@@ -230,14 +232,18 @@ type API interface {
DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error
// Flags returns the flag values that Prometheus was launched with.
Flags(ctx context.Context) (FlagsResult, error)
- // LabelNames returns all the unique label names present in the block in sorted order.
- LabelNames(ctx context.Context, startTime time.Time, endTime time.Time) ([]string, Warnings, error)
- // LabelValues performs a query for the values of the given label.
- LabelValues(ctx context.Context, label string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error)
+ // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers.
+ LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error)
+ // LabelValues performs a query for the values of the given label, time range and matchers.
+ LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error)
// Query performs a query for the given time.
Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error)
// QueryRange performs a query for the given range.
QueryRange(ctx context.Context, query string, r Range) (model.Value, Warnings, error)
+ // QueryExemplars performs a query for exemplars by the given query and time range.
+ QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error)
+ // Buildinfo returns various build information properties about the Prometheus server
+ Buildinfo(ctx context.Context) (BuildinfoResult, error)
// Runtimeinfo returns the various runtime information properties about the Prometheus server.
Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error)
// Series finds series by label matchers.
@@ -281,6 +287,16 @@ type ConfigResult struct {
// FlagsResult contains the result from querying the flag endpoint.
type FlagsResult map[string]string
+// BuildinfoResult contains the results from querying the buildinfo endpoint.
+type BuildinfoResult struct {
+ Version string `json:"version"`
+ Revision string `json:"revision"`
+ Branch string `json:"branch"`
+ BuildUser string `json:"buildUser"`
+ BuildDate string `json:"buildDate"`
+ GoVersion string `json:"goVersion"`
+}
+
// RuntimeinfoResult contains the result from querying the runtimeinfo endpoint.
type RuntimeinfoResult struct {
StartTime time.Time `json:"startTime"`
@@ -331,23 +347,28 @@ type Rules []interface{}
// AlertingRule models a alerting rule.
type AlertingRule struct {
- Name string `json:"name"`
- Query string `json:"query"`
- Duration float64 `json:"duration"`
- Labels model.LabelSet `json:"labels"`
- Annotations model.LabelSet `json:"annotations"`
- Alerts []*Alert `json:"alerts"`
- Health RuleHealth `json:"health"`
- LastError string `json:"lastError,omitempty"`
+ Name string `json:"name"`
+ Query string `json:"query"`
+ Duration float64 `json:"duration"`
+ Labels model.LabelSet `json:"labels"`
+ Annotations model.LabelSet `json:"annotations"`
+ Alerts []*Alert `json:"alerts"`
+ Health RuleHealth `json:"health"`
+ LastError string `json:"lastError,omitempty"`
+ EvaluationTime float64 `json:"evaluationTime"`
+ LastEvaluation time.Time `json:"lastEvaluation"`
+ State string `json:"state"`
}
// RecordingRule models a recording rule.
type RecordingRule struct {
- Name string `json:"name"`
- Query string `json:"query"`
- Labels model.LabelSet `json:"labels,omitempty"`
- Health RuleHealth `json:"health"`
- LastError string `json:"lastError,omitempty"`
+ Name string `json:"name"`
+ Query string `json:"query"`
+ Labels model.LabelSet `json:"labels,omitempty"`
+ Health RuleHealth `json:"health"`
+ LastError string `json:"lastError,omitempty"`
+ EvaluationTime float64 `json:"evaluationTime"`
+ LastEvaluation time.Time `json:"lastEvaluation"`
}
// Alert models an active alert.
@@ -367,12 +388,15 @@ type TargetsResult struct {
// ActiveTarget models an active Prometheus scrape target.
type ActiveTarget struct {
- DiscoveredLabels map[string]string `json:"discoveredLabels"`
- Labels model.LabelSet `json:"labels"`
- ScrapeURL string `json:"scrapeUrl"`
- LastError string `json:"lastError"`
- LastScrape time.Time `json:"lastScrape"`
- Health HealthStatus `json:"health"`
+ DiscoveredLabels map[string]string `json:"discoveredLabels"`
+ Labels model.LabelSet `json:"labels"`
+ ScrapePool string `json:"scrapePool"`
+ ScrapeURL string `json:"scrapeUrl"`
+ GlobalURL string `json:"globalUrl"`
+ LastError string `json:"lastError"`
+ LastScrape time.Time `json:"lastScrape"`
+ LastScrapeDuration float64 `json:"lastScrapeDuration"`
+ Health HealthStatus `json:"health"`
}
// DroppedTarget models a dropped Prometheus scrape target.
@@ -467,14 +491,17 @@ func (r *AlertingRule) UnmarshalJSON(b []byte) error {
}
rule := struct {
- Name string `json:"name"`
- Query string `json:"query"`
- Duration float64 `json:"duration"`
- Labels model.LabelSet `json:"labels"`
- Annotations model.LabelSet `json:"annotations"`
- Alerts []*Alert `json:"alerts"`
- Health RuleHealth `json:"health"`
- LastError string `json:"lastError,omitempty"`
+ Name string `json:"name"`
+ Query string `json:"query"`
+ Duration float64 `json:"duration"`
+ Labels model.LabelSet `json:"labels"`
+ Annotations model.LabelSet `json:"annotations"`
+ Alerts []*Alert `json:"alerts"`
+ Health RuleHealth `json:"health"`
+ LastError string `json:"lastError,omitempty"`
+ EvaluationTime float64 `json:"evaluationTime"`
+ LastEvaluation time.Time `json:"lastEvaluation"`
+ State string `json:"state"`
}{}
if err := json.Unmarshal(b, &rule); err != nil {
return err
@@ -487,6 +514,9 @@ func (r *AlertingRule) UnmarshalJSON(b []byte) error {
r.Duration = rule.Duration
r.Labels = rule.Labels
r.LastError = rule.LastError
+ r.EvaluationTime = rule.EvaluationTime
+ r.LastEvaluation = rule.LastEvaluation
+ r.State = rule.State
return nil
}
@@ -506,11 +536,13 @@ func (r *RecordingRule) UnmarshalJSON(b []byte) error {
}
rule := struct {
- Name string `json:"name"`
- Query string `json:"query"`
- Labels model.LabelSet `json:"labels,omitempty"`
- Health RuleHealth `json:"health"`
- LastError string `json:"lastError,omitempty"`
+ Name string `json:"name"`
+ Query string `json:"query"`
+ Labels model.LabelSet `json:"labels,omitempty"`
+ Health RuleHealth `json:"health"`
+ LastError string `json:"lastError,omitempty"`
+ EvaluationTime float64 `json:"evaluationTime"`
+ LastEvaluation time.Time `json:"lastEvaluation"`
}{}
if err := json.Unmarshal(b, &rule); err != nil {
return err
@@ -520,6 +552,8 @@ func (r *RecordingRule) UnmarshalJSON(b []byte) error {
r.Name = rule.Name
r.LastError = rule.LastError
r.Query = rule.Query
+ r.EvaluationTime = rule.EvaluationTime
+ r.LastEvaluation = rule.LastEvaluation
return nil
}
@@ -557,6 +591,18 @@ func (qr *queryResult) UnmarshalJSON(b []byte) error {
return err
}
+// Exemplar is additional information associated with a time series.
+type Exemplar struct {
+ Labels model.LabelSet `json:"labels"`
+ Value model.SampleValue `json:"value"`
+ Timestamp model.Time `json:"timestamp"`
+}
+
+type ExemplarQueryResult struct {
+ SeriesLabels model.LabelSet `json:"seriesLabels"`
+ Exemplars []Exemplar `json:"exemplars"`
+}
+
// NewAPI returns a new API for the client.
//
// It is safe to use the returned API from multiple goroutines.
@@ -674,6 +720,23 @@ func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) {
return res, json.Unmarshal(body, &res)
}
+func (h *httpAPI) Buildinfo(ctx context.Context) (BuildinfoResult, error) {
+ u := h.client.URL(epBuildinfo, nil)
+
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return BuildinfoResult{}, err
+ }
+
+ _, body, _, err := h.client.Do(ctx, req)
+ if err != nil {
+ return BuildinfoResult{}, err
+ }
+
+ var res BuildinfoResult
+ return res, json.Unmarshal(body, &res)
+}
+
func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) {
u := h.client.URL(epRuntimeinfo, nil)
@@ -691,11 +754,14 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) {
return res, json.Unmarshal(body, &res)
}
-func (h *httpAPI) LabelNames(ctx context.Context, startTime time.Time, endTime time.Time) ([]string, Warnings, error) {
+func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) {
u := h.client.URL(epLabels, nil)
q := u.Query()
q.Set("start", formatTime(startTime))
q.Set("end", formatTime(endTime))
+ for _, m := range matches {
+ q.Add("match[]", m)
+ }
u.RawQuery = q.Encode()
@@ -711,11 +777,14 @@ func (h *httpAPI) LabelNames(ctx context.Context, startTime time.Time, endTime t
return labelNames, w, json.Unmarshal(body, &labelNames)
}
-func (h *httpAPI) LabelValues(ctx context.Context, label string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) {
+func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) {
u := h.client.URL(epLabelValues, map[string]string{"name": label})
q := u.Query()
q.Set("start", formatTime(startTime))
q.Set("end", formatTime(endTime))
+ for _, m := range matches {
+ q.Add("match[]", m)
+ }
u.RawQuery = q.Encode()
@@ -913,7 +982,29 @@ func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) {
var res TSDBResult
return res, json.Unmarshal(body, &res)
+}
+
+func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) {
+ u := h.client.URL(epQueryExemplars, nil)
+ q := u.Query()
+
+ q.Set("query", query)
+ q.Set("start", formatTime(startTime))
+ q.Set("end", formatTime(endTime))
+ u.RawQuery = q.Encode()
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ _, body, _, err := h.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []ExemplarQueryResult
+ return res, json.Unmarshal(body, &res)
}
// Warnings is an array of non critical errors
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
deleted file mode 100644
index 288f0e854..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.12
-
-package prometheus
-
-import "runtime/debug"
-
-// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
-func readBuildInfo() (path, version, sum string) {
- path, version, sum = "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
deleted file mode 100644
index 6609e2877..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.12
-
-package prometheus
-
-// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
-// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
-func readBuildInfo() (path, version, sum string) {
- return "unknown", "unknown", "unknown"
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 957d93a2d..4bb816ab7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -20,7 +20,7 @@ import (
"strings"
"github.com/cespare/xxhash/v2"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
index 18a99d5fa..c41ab37f3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -22,43 +22,10 @@ type expvarCollector struct {
exports map[string]*Desc
}
-// NewExpvarCollector returns a newly allocated expvar Collector that still has
-// to be registered with a Prometheus registry.
+// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
+// See there for documentation.
//
-// An expvar Collector collects metrics from the expvar interface. It provides a
-// quick way to expose numeric values that are already exported via expvar as
-// Prometheus metrics. Note that the data models of expvar and Prometheus are
-// fundamentally different, and that the expvar Collector is inherently slower
-// than native Prometheus metrics. Thus, the expvar Collector is probably great
-// for experiments and prototying, but you should seriously consider a more
-// direct implementation of Prometheus metrics for monitoring production
-// systems.
-//
-// The exports map has the following meaning:
-//
-// The keys in the map correspond to expvar keys, i.e. for every expvar key you
-// want to export as Prometheus metric, you need an entry in the exports
-// map. The descriptor mapped to each key describes how to export the expvar
-// value. It defines the name and the help string of the Prometheus metric
-// proxying the expvar value. The type will always be Untyped.
-//
-// For descriptors without variable labels, the expvar value must be a number or
-// a bool. The number is then directly exported as the Prometheus sample
-// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
-// that are not numbers or bools are silently ignored.
-//
-// If the descriptor has one variable label, the expvar value must be an expvar
-// map. The keys in the expvar map become the various values of the one
-// Prometheus label. The values in the expvar map must be numbers or bools again
-// as above.
-//
-// For descriptors with more than one variable label, the expvar must be a
-// nested expvar map, i.e. where the values of the topmost map are maps again
-// etc. until a depth is reached that corresponds to the number of labels. The
-// leaves of that structure must be numbers or bools as above to serve as the
-// sample values.
-//
-// Anything that does not fit into the scheme above is silently ignored.
+// Deprecated: Use collectors.NewExpvarCollector instead.
func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{
exports: exports,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index 6f67d1046..a96ed1cee 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -36,32 +36,10 @@ type goCollector struct {
msMaxAge time.Duration // Maximum allowed age of old memstats.
}
-// NewGoCollector returns a collector that exports metrics about the current Go
-// process. This includes memory stats. To collect those, runtime.ReadMemStats
-// is called. This requires to “stop the world”, which usually only happens for
-// garbage collection (GC). Take the following implications into account when
-// deciding whether to use the Go collector:
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
//
-// 1. The performance impact of stopping the world is the more relevant the more
-// frequently metrics are collected. However, with Go1.9 or later the
-// stop-the-world time per metrics collection is very short (~25µs) so that the
-// performance impact will only matter in rare cases. However, with older Go
-// versions, the stop-the-world duration depends on the heap size and can be
-// quite significant (~1.7 ms/GiB as per
-// https://go-review.googlesource.com/c/go/+/34937).
-//
-// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
-// metrics collection happens to coincide with GC, it will only complete after
-// GC has finished. Usually, GC is fast enough to not cause problems. However,
-// with a very large heap, GC might take multiple seconds, which is enough to
-// cause scrape timeouts in common setups. To avoid this problem, the Go
-// collector will use the memstats from a previous collection if
-// runtime.ReadMemStats takes more than 1s. However, if there are no previously
-// collected memstats, or their collection is more than 5m ago, the collection
-// will block until runtime.ReadMemStats succeeds.
-//
-// NOTE: The problem is solved in Go 1.15, see
-// https://github.com/golang/go/issues/19812 for the related Go issue.
+// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
return &goCollector{
goroutinesDesc: NewDesc(
@@ -366,25 +344,17 @@ type memStatsMetrics []struct {
valType ValueType
}
-// NewBuildInfoCollector returns a collector collecting a single metric
-// "go_build_info" with the constant value 1 and three labels "path", "version",
-// and "checksum". Their label values contain the main module path, version, and
-// checksum, respectively. The labels will only have meaningful values if the
-// binary is built with Go module support and from source code retrieved from
-// the source repository (rather than the local file system). This is usually
-// accomplished by building from outside of GOPATH, specifying the full address
-// of the main package, e.g. "GO111MODULE=on go run
-// github.com/prometheus/client_golang/examples/random". If built without Go
-// module support, all label values will be "unknown". If built with Go module
-// support but using the source code from the local file system, the "path" will
-// be set appropriately, but "checksum" will be empty and "version" will be
-// "(devel)".
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
//
-// This collector uses only the build information for the main module. See
-// https://github.com/povilasv/prommod for an example of a collector for the
-// module dependencies.
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
- path, version, sum := readBuildInfo()
+ path, version, sum := "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index f71e286be..8425640b3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -22,7 +22,7 @@ import (
"sync/atomic"
"time"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@@ -47,7 +47,12 @@ type Histogram interface {
Metric
Collector
- // Observe adds a single observation to the histogram.
+ // Observe adds a single observation to the histogram. Observations are
+ // usually positive or zero. Negative observations are accepted but
+ // prevent current versions of Prometheus from properly detecting
+ // counter resets in the sum of observations. See
+ // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+ // for details.
Observe(float64)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index a2b80b1c1..dc121910a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -17,7 +17,7 @@ import (
"strings"
"time"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
@@ -58,7 +58,7 @@ type Metric interface {
}
// Opts bundles the options for creating most Metric types. Each metric
-// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// implementation XXX has its own XXXOpts type, but in most cases, it is just
// an alias of this type (which might change when the requirement arises.)
//
// It is mandatory to set Name to a non-empty string. All other fields are
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index c46702d60..5bfe0ff5b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -54,16 +54,10 @@ type ProcessCollectorOpts struct {
ReportErrors bool
}
-// NewProcessCollector returns a collector which exports the current state of
-// process metrics including CPU, memory and file descriptor usage as well as
-// the process start time. The detailed behavior is defined by the provided
-// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
-// collector for the current process with an empty namespace string and no error
-// reporting.
+// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
+// See there for documentation.
//
-// The collector only works on operating systems with a Linux-style proc
-// filesystem and on Microsoft Windows. On other operating systems, it will not
-// collect any metrics.
+// Deprecated: Use collectors.NewProcessCollector instead.
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
ns := ""
if len(opts.Namespace) > 0 {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index 5070e72e2..e7c0d0546 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -83,8 +83,7 @@ type readerFromDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator }
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
- //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
- //remove support from client_golang yet.
+ //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d flusherDelegator) Flush() {
@@ -348,8 +347,7 @@ func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) deleg
}
id := 0
- //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
- //remove support from client_golang yet.
+ //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 48f5ef9d7..383a7f594 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -26,7 +26,7 @@ import (
"unicode/utf8"
"github.com/cespare/xxhash/v2"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index cf7007149..c5fa8ed7c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -23,7 +23,7 @@ import (
"time"
"github.com/beorn7/perks/quantile"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@@ -55,7 +55,12 @@ type Summary interface {
Metric
Collector
- // Observe adds a single observation to the summary.
+ // Observe adds a single observation to the summary. Observations are
+ // usually positive or zero. Negative observations are accepted but
+ // prevent current versions of Prometheus from properly detecting
+ // counter resets in the sum of observations. See
+ // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+ // for details.
Observe(float64)
}
@@ -121,7 +126,9 @@ type SummaryOpts struct {
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
- // for the summary. Must be positive. The default value is DefMaxAge.
+ // for the summary. Only applies to pre-calculated quantiles, does not
+ // apply to _sum and _count. Must be positive. The default value is
+ // DefMaxAge.
MaxAge time.Duration
// AgeBuckets is the number of buckets used to exclude observations that
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 8304de477..c778711b8 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -19,7 +19,7 @@ import (
"time"
"unicode/utf8"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 6ba49d85b..4ababe6c9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -167,8 +167,8 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// calling the newMetric function provided during construction of the
// MetricVec).
//
-// It is possible to call this method without using the returned Metry to only
-// create the new Metric but leave it in its intitial state.
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it in its initial state.
//
// Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index c1b12f084..74ee93280 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,7 +17,7 @@ import (
"fmt"
"sort"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
+ //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go
index d8a7351f9..fffda4a7e 100644
--- a/vendor/github.com/prometheus/common/config/config.go
+++ b/vendor/github.com/prometheus/common/config/config.go
@@ -16,7 +16,12 @@
package config
-import "path/filepath"
+import (
+ "encoding/json"
+ "path/filepath"
+)
+
+const secretToken = ""
// Secret special type for storing secrets.
type Secret string
@@ -24,7 +29,7 @@ type Secret string
// MarshalYAML implements the yaml.Marshaler interface for Secrets.
func (s Secret) MarshalYAML() (interface{}, error) {
if s != "" {
- return "", nil
+ return secretToken, nil
}
return nil, nil
}
@@ -35,6 +40,14 @@ func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error {
return unmarshal((*plain)(s))
}
+// MarshalJSON implements the json.Marshaler interface for Secret.
+func (s Secret) MarshalJSON() ([]byte, error) {
+ if len(s) == 0 {
+ return json.Marshal("")
+ }
+ return json.Marshal(secretToken)
+}
+
// DirectorySetter is a config type that contains file paths that may
// be relative to the file containing the config.
type DirectorySetter interface {
diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go
index 4dd887585..6c0c033d4 100644
--- a/vendor/github.com/prometheus/common/config/http_config.go
+++ b/vendor/github.com/prometheus/common/config/http_config.go
@@ -17,31 +17,51 @@ package config
import (
"bytes"
- "crypto/md5"
+ "context"
+ "crypto/sha256"
"crypto/tls"
"crypto/x509"
+ "encoding/json"
"fmt"
"io/ioutil"
+ "net"
"net/http"
"net/url"
+ "os"
"strings"
"sync"
"time"
"github.com/mwitkow/go-conntrack"
"golang.org/x/net/http2"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/clientcredentials"
"gopkg.in/yaml.v2"
)
+// DefaultHTTPClientConfig is the default HTTP client configuration.
+var DefaultHTTPClientConfig = HTTPClientConfig{
+ FollowRedirects: true,
+}
+
+// defaultHTTPClientOptions holds the default HTTP client options.
+var defaultHTTPClientOptions = httpClientOptions{
+ keepAlivesEnabled: true,
+ http2Enabled: true,
+ // 5 minutes is typically above the maximum sane scrape interval. So we can
+ // use keepalive for all configurations.
+ idleConnTimeout: 5 * time.Minute,
+}
+
type closeIdler interface {
CloseIdleConnections()
}
// BasicAuth contains basic HTTP authentication credentials.
type BasicAuth struct {
- Username string `yaml:"username"`
- Password Secret `yaml:"password,omitempty"`
- PasswordFile string `yaml:"password_file,omitempty"`
+ Username string `yaml:"username" json:"username"`
+ Password Secret `yaml:"password,omitempty" json:"password,omitempty"`
+ PasswordFile string `yaml:"password_file,omitempty" json:"password_file,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@@ -52,6 +72,21 @@ func (a *BasicAuth) SetDirectory(dir string) {
a.PasswordFile = JoinDir(dir, a.PasswordFile)
}
+// Authorization contains HTTP authorization credentials.
+type Authorization struct {
+ Type string `yaml:"type,omitempty" json:"type,omitempty"`
+ Credentials Secret `yaml:"credentials,omitempty" json:"credentials,omitempty"`
+ CredentialsFile string `yaml:"credentials_file,omitempty" json:"credentials_file,omitempty"`
+}
+
+// SetDirectory joins any relative file paths with dir.
+func (a *Authorization) SetDirectory(dir string) {
+ if a == nil {
+ return
+ }
+ a.CredentialsFile = JoinDir(dir, a.CredentialsFile)
+}
+
// URL is a custom URL type that allows validation at configuration load time.
type URL struct {
*url.URL
@@ -80,18 +115,68 @@ func (u URL) MarshalYAML() (interface{}, error) {
return nil, nil
}
+// UnmarshalJSON implements the json.Marshaler interface for URL.
+func (u *URL) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ urlp, err := url.Parse(s)
+ if err != nil {
+ return err
+ }
+ u.URL = urlp
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface for URL.
+func (u URL) MarshalJSON() ([]byte, error) {
+ if u.URL != nil {
+ return json.Marshal(u.URL.String())
+ }
+ return []byte("null"), nil
+}
+
+// OAuth2 is the oauth2 client configuration.
+type OAuth2 struct {
+ ClientID string `yaml:"client_id" json:"client_id"`
+ ClientSecret Secret `yaml:"client_secret" json:"client_secret"`
+ ClientSecretFile string `yaml:"client_secret_file" json:"client_secret_file"`
+ Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"`
+ TokenURL string `yaml:"token_url" json:"token_url"`
+ EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"`
+}
+
+// SetDirectory joins any relative file paths with dir.
+func (a *OAuth2) SetDirectory(dir string) {
+ if a == nil {
+ return
+ }
+ a.ClientSecretFile = JoinDir(dir, a.ClientSecretFile)
+}
+
// HTTPClientConfig configures an HTTP client.
type HTTPClientConfig struct {
// The HTTP basic authentication credentials for the targets.
- BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
- // The bearer token for the targets.
- BearerToken Secret `yaml:"bearer_token,omitempty"`
- // The bearer token file for the targets.
- BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
+ BasicAuth *BasicAuth `yaml:"basic_auth,omitempty" json:"basic_auth,omitempty"`
+ // The HTTP authorization credentials for the targets.
+ Authorization *Authorization `yaml:"authorization,omitempty" json:"authorization,omitempty"`
+ // The OAuth2 client credentials used to fetch a token for the targets.
+ OAuth2 *OAuth2 `yaml:"oauth2,omitempty" json:"oauth2,omitempty"`
+ // The bearer token for the targets. Deprecated in favour of
+ // Authorization.Credentials.
+ BearerToken Secret `yaml:"bearer_token,omitempty" json:"bearer_token,omitempty"`
+ // The bearer token file for the targets. Deprecated in favour of
+ // Authorization.CredentialsFile.
+ BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
- ProxyURL URL `yaml:"proxy_url,omitempty"`
+ ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"`
// TLSConfig to use to connect to the targets.
- TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
+ TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"`
+ // FollowRedirects specifies whether the client should follow HTTP 3xx redirects.
+ // The omitempty flag is not set, because it would be hidden from the
+ // marshalled configuration when set to false.
+ FollowRedirects bool `yaml:"follow_redirects" json:"follow_redirects"`
}
// SetDirectory joins any relative file paths with dir.
@@ -101,102 +186,241 @@ func (c *HTTPClientConfig) SetDirectory(dir string) {
}
c.TLSConfig.SetDirectory(dir)
c.BasicAuth.SetDirectory(dir)
+ c.Authorization.SetDirectory(dir)
+ c.OAuth2.SetDirectory(dir)
c.BearerTokenFile = JoinDir(dir, c.BearerTokenFile)
}
// Validate validates the HTTPClientConfig to check only one of BearerToken,
// BasicAuth and BearerTokenFile is configured.
func (c *HTTPClientConfig) Validate() error {
+ // Backwards compatibility with the bearer_token field.
if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 {
return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured")
}
- if c.BasicAuth != nil && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) {
- return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured")
+ if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) {
+ return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured")
}
if c.BasicAuth != nil && (string(c.BasicAuth.Password) != "" && c.BasicAuth.PasswordFile != "") {
return fmt.Errorf("at most one of basic_auth password & password_file must be configured")
}
+ if c.Authorization != nil {
+ if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 {
+ return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file")
+ }
+ if string(c.Authorization.Credentials) != "" && c.Authorization.CredentialsFile != "" {
+ return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured")
+ }
+ c.Authorization.Type = strings.TrimSpace(c.Authorization.Type)
+ if len(c.Authorization.Type) == 0 {
+ c.Authorization.Type = "Bearer"
+ }
+ if strings.ToLower(c.Authorization.Type) == "basic" {
+ return fmt.Errorf(`authorization type cannot be set to "basic", use "basic_auth" instead`)
+ }
+ if c.BasicAuth != nil || c.OAuth2 != nil {
+ return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured")
+ }
+ } else {
+ if len(c.BearerToken) > 0 {
+ c.Authorization = &Authorization{Credentials: c.BearerToken}
+ c.Authorization.Type = "Bearer"
+ c.BearerToken = ""
+ }
+ if len(c.BearerTokenFile) > 0 {
+ c.Authorization = &Authorization{CredentialsFile: c.BearerTokenFile}
+ c.Authorization.Type = "Bearer"
+ c.BearerTokenFile = ""
+ }
+ }
+ if c.OAuth2 != nil {
+ if c.BasicAuth != nil {
+ return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured")
+ }
+ if len(c.OAuth2.ClientID) == 0 {
+ return fmt.Errorf("oauth2 client_id must be configured")
+ }
+ if len(c.OAuth2.ClientSecret) == 0 && len(c.OAuth2.ClientSecretFile) == 0 {
+ return fmt.Errorf("either oauth2 client_secret or client_secret_file must be configured")
+ }
+ if len(c.OAuth2.TokenURL) == 0 {
+ return fmt.Errorf("oauth2 token_url must be configured")
+ }
+ if len(c.OAuth2.ClientSecret) > 0 && len(c.OAuth2.ClientSecretFile) > 0 {
+ return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured")
+ }
+ }
return nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface
func (c *HTTPClientConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain HTTPClientConfig
+ *c = DefaultHTTPClientConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
return c.Validate()
}
+// UnmarshalJSON implements the json.Marshaler interface for URL.
+func (c *HTTPClientConfig) UnmarshalJSON(data []byte) error {
+ type plain HTTPClientConfig
+ *c = DefaultHTTPClientConfig
+ if err := json.Unmarshal(data, (*plain)(c)); err != nil {
+ return err
+ }
+ return c.Validate()
+}
+
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (a *BasicAuth) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain BasicAuth
return unmarshal((*plain)(a))
}
+// DialContextFunc defines the signature of the DialContext() function implemented
+// by net.Dialer.
+type DialContextFunc func(context.Context, string, string) (net.Conn, error)
+
+type httpClientOptions struct {
+ dialContextFunc DialContextFunc
+ keepAlivesEnabled bool
+ http2Enabled bool
+ idleConnTimeout time.Duration
+}
+
+// HTTPClientOption defines an option that can be applied to the HTTP client.
+type HTTPClientOption func(options *httpClientOptions)
+
+// WithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`.
+func WithDialContextFunc(fn DialContextFunc) HTTPClientOption {
+ return func(opts *httpClientOptions) {
+ opts.dialContextFunc = fn
+ }
+}
+
+// WithKeepAlivesDisabled allows to disable HTTP keepalive.
+func WithKeepAlivesDisabled() HTTPClientOption {
+ return func(opts *httpClientOptions) {
+ opts.keepAlivesEnabled = false
+ }
+}
+
+// WithHTTP2Disabled allows to disable HTTP2.
+func WithHTTP2Disabled() HTTPClientOption {
+ return func(opts *httpClientOptions) {
+ opts.http2Enabled = false
+ }
+}
+
+// WithIdleConnTimeout allows setting the idle connection timeout.
+func WithIdleConnTimeout(timeout time.Duration) HTTPClientOption {
+ return func(opts *httpClientOptions) {
+ opts.idleConnTimeout = timeout
+ }
+}
+
// NewClient returns a http.Client using the specified http.RoundTripper.
func newClient(rt http.RoundTripper) *http.Client {
return &http.Client{Transport: rt}
}
// NewClientFromConfig returns a new HTTP client configured for the
-// given config.HTTPClientConfig. The name is used as go-conntrack metric label.
-func NewClientFromConfig(cfg HTTPClientConfig, name string, disableKeepAlives, enableHTTP2 bool) (*http.Client, error) {
- rt, err := NewRoundTripperFromConfig(cfg, name, disableKeepAlives, enableHTTP2)
+// given config.HTTPClientConfig and config.HTTPClientOption.
+// The name is used as go-conntrack metric label.
+func NewClientFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (*http.Client, error) {
+ rt, err := NewRoundTripperFromConfig(cfg, name, optFuncs...)
if err != nil {
return nil, err
}
- return newClient(rt), nil
+ client := newClient(rt)
+ if !cfg.FollowRedirects {
+ client.CheckRedirect = func(*http.Request, []*http.Request) error {
+ return http.ErrUseLastResponse
+ }
+ }
+ return client, nil
}
// NewRoundTripperFromConfig returns a new HTTP RoundTripper configured for the
-// given config.HTTPClientConfig. The name is used as go-conntrack metric label.
-func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, disableKeepAlives, enableHTTP2 bool) (http.RoundTripper, error) {
+// given config.HTTPClientConfig and config.HTTPClientOption.
+// The name is used as go-conntrack metric label.
+func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (http.RoundTripper, error) {
+ opts := defaultHTTPClientOptions
+ for _, f := range optFuncs {
+ f(&opts)
+ }
+
+ var dialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ if opts.dialContextFunc != nil {
+ dialContext = conntrack.NewDialContextFunc(
+ conntrack.DialWithDialContextFunc((func(context.Context, string, string) (net.Conn, error))(opts.dialContextFunc)),
+ conntrack.DialWithTracing(),
+ conntrack.DialWithName(name))
+ } else {
+ dialContext = conntrack.NewDialContextFunc(
+ conntrack.DialWithTracing(),
+ conntrack.DialWithName(name))
+ }
+
newRT := func(tlsConfig *tls.Config) (http.RoundTripper, error) {
// The only timeout we care about is the configured scrape timeout.
// It is applied on request. So we leave out any timings here.
var rt http.RoundTripper = &http.Transport{
- Proxy: http.ProxyURL(cfg.ProxyURL.URL),
- MaxIdleConns: 20000,
- MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
- DisableKeepAlives: disableKeepAlives,
- TLSClientConfig: tlsConfig,
- DisableCompression: true,
- // 5 minutes is typically above the maximum sane scrape interval. So we can
- // use keepalive for all configurations.
- IdleConnTimeout: 5 * time.Minute,
+ Proxy: http.ProxyURL(cfg.ProxyURL.URL),
+ MaxIdleConns: 20000,
+ MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
+ DisableKeepAlives: !opts.keepAlivesEnabled,
+ TLSClientConfig: tlsConfig,
+ DisableCompression: true,
+ IdleConnTimeout: opts.idleConnTimeout,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
- DialContext: conntrack.NewDialContextFunc(
- conntrack.DialWithTracing(),
- conntrack.DialWithName(name),
- ),
+ DialContext: dialContext,
}
- if enableHTTP2 {
+ if opts.http2Enabled || os.Getenv("PROMETHEUS_COMMON_ENABLE_HTTP2") != "" {
// HTTP/2 support is golang has many problematic cornercases where
// dead connections would be kept and used in connection pools.
// https://github.com/golang/go/issues/32388
// https://github.com/golang/go/issues/39337
// https://github.com/golang/go/issues/39750
- // TODO: Re-Enable HTTP/2 once upstream issue is fixed.
- // TODO: use ForceAttemptHTTP2 when we move to Go 1.13+.
- err := http2.ConfigureTransport(rt.(*http.Transport))
+
+ // Enable HTTP2 if the environment variable
+ // PROMETHEUS_COMMON_ENABLE_HTTP2 is set.
+ // This is a temporary workaround so that users can safely test this
+ // and validate that HTTP2 can be enabled Prometheus-Wide again.
+
+ http2t, err := http2.ConfigureTransports(rt.(*http.Transport))
if err != nil {
return nil, err
}
+ http2t.ReadIdleTimeout = time.Minute
}
- // If a bearer token is provided, create a round tripper that will set the
+ // If a authorization_credentials is provided, create a round tripper that will set the
// Authorization header correctly on each request.
+ if cfg.Authorization != nil && len(cfg.Authorization.Credentials) > 0 {
+ rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt)
+ } else if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 {
+ rt = NewAuthorizationCredentialsFileRoundTripper(cfg.Authorization.Type, cfg.Authorization.CredentialsFile, rt)
+ }
+ // Backwards compatibility, be nice with importers who would not have
+ // called Validate().
if len(cfg.BearerToken) > 0 {
- rt = NewBearerAuthRoundTripper(cfg.BearerToken, rt)
+ rt = NewAuthorizationCredentialsRoundTripper("Bearer", cfg.BearerToken, rt)
} else if len(cfg.BearerTokenFile) > 0 {
- rt = NewBearerAuthFileRoundTripper(cfg.BearerTokenFile, rt)
+ rt = NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.BearerTokenFile, rt)
}
if cfg.BasicAuth != nil {
rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, rt)
}
+
+ if cfg.OAuth2 != nil {
+ rt = NewOAuth2RoundTripper(cfg.OAuth2, rt)
+ }
// Return a new configured RoundTripper.
return rt, nil
}
@@ -211,61 +435,64 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, disableKeepAli
return newRT(tlsConfig)
}
- return newTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT)
+ return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT)
}
-type bearerAuthRoundTripper struct {
- bearerToken Secret
- rt http.RoundTripper
+type authorizationCredentialsRoundTripper struct {
+ authType string
+ authCredentials Secret
+ rt http.RoundTripper
}
-// NewBearerAuthRoundTripper adds the provided bearer token to a request unless the authorization
-// header has already been set.
-func NewBearerAuthRoundTripper(token Secret, rt http.RoundTripper) http.RoundTripper {
- return &bearerAuthRoundTripper{token, rt}
+// NewAuthorizationCredentialsRoundTripper adds the provided credentials to a
+// request unless the authorization header has already been set.
+func NewAuthorizationCredentialsRoundTripper(authType string, authCredentials Secret, rt http.RoundTripper) http.RoundTripper {
+ return &authorizationCredentialsRoundTripper{authType, authCredentials, rt}
}
-func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+func (rt *authorizationCredentialsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get("Authorization")) == 0 {
req = cloneRequest(req)
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", string(rt.bearerToken)))
+ req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, string(rt.authCredentials)))
}
return rt.rt.RoundTrip(req)
}
-func (rt *bearerAuthRoundTripper) CloseIdleConnections() {
+func (rt *authorizationCredentialsRoundTripper) CloseIdleConnections() {
if ci, ok := rt.rt.(closeIdler); ok {
ci.CloseIdleConnections()
}
}
-type bearerAuthFileRoundTripper struct {
- bearerFile string
- rt http.RoundTripper
+type authorizationCredentialsFileRoundTripper struct {
+ authType string
+ authCredentialsFile string
+ rt http.RoundTripper
}
-// NewBearerAuthFileRoundTripper adds the bearer token read from the provided file to a request unless
-// the authorization header has already been set. This file is read for every request.
-func NewBearerAuthFileRoundTripper(bearerFile string, rt http.RoundTripper) http.RoundTripper {
- return &bearerAuthFileRoundTripper{bearerFile, rt}
+// NewAuthorizationCredentialsFileRoundTripper adds the authorization
+// credentials read from the provided file to a request unless the authorization
+// header has already been set. This file is read for every request.
+func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile string, rt http.RoundTripper) http.RoundTripper {
+ return &authorizationCredentialsFileRoundTripper{authType, authCredentialsFile, rt}
}
-func (rt *bearerAuthFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get("Authorization")) == 0 {
- b, err := ioutil.ReadFile(rt.bearerFile)
+ b, err := ioutil.ReadFile(rt.authCredentialsFile)
if err != nil {
- return nil, fmt.Errorf("unable to read bearer token file %s: %s", rt.bearerFile, err)
+ return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err)
}
- bearerToken := strings.TrimSpace(string(b))
+ authCredentials := strings.TrimSpace(string(b))
req = cloneRequest(req)
- req.Header.Set("Authorization", "Bearer "+bearerToken)
+ req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, authCredentials))
}
return rt.rt.RoundTrip(req)
}
-func (rt *bearerAuthFileRoundTripper) CloseIdleConnections() {
+func (rt *authorizationCredentialsFileRoundTripper) CloseIdleConnections() {
if ci, ok := rt.rt.(closeIdler); ok {
ci.CloseIdleConnections()
}
@@ -307,6 +534,84 @@ func (rt *basicAuthRoundTripper) CloseIdleConnections() {
}
}
+type oauth2RoundTripper struct {
+ config *OAuth2
+ rt http.RoundTripper
+ next http.RoundTripper
+ secret string
+ mtx sync.RWMutex
+}
+
+func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper) http.RoundTripper {
+ return &oauth2RoundTripper{
+ config: config,
+ next: next,
+ }
+}
+
+func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ var (
+ secret string
+ changed bool
+ )
+
+ if rt.config.ClientSecretFile != "" {
+ data, err := ioutil.ReadFile(rt.config.ClientSecretFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err)
+ }
+ secret = strings.TrimSpace(string(data))
+ rt.mtx.RLock()
+ changed = secret != rt.secret
+ rt.mtx.RUnlock()
+ }
+
+ if changed || rt.rt == nil {
+ if rt.config.ClientSecret != "" {
+ secret = string(rt.config.ClientSecret)
+ }
+
+ config := &clientcredentials.Config{
+ ClientID: rt.config.ClientID,
+ ClientSecret: secret,
+ Scopes: rt.config.Scopes,
+ TokenURL: rt.config.TokenURL,
+ EndpointParams: mapToValues(rt.config.EndpointParams),
+ }
+
+ tokenSource := config.TokenSource(context.Background())
+
+ rt.mtx.Lock()
+ rt.secret = secret
+ rt.rt = &oauth2.Transport{
+ Base: rt.next,
+ Source: tokenSource,
+ }
+ rt.mtx.Unlock()
+ }
+
+ rt.mtx.RLock()
+ currentRT := rt.rt
+ rt.mtx.RUnlock()
+ return currentRT.RoundTrip(req)
+}
+
+func (rt *oauth2RoundTripper) CloseIdleConnections() {
+ // OAuth2 RT does not support CloseIdleConnections() but the next RT might.
+ if ci, ok := rt.next.(closeIdler); ok {
+ ci.CloseIdleConnections()
+ }
+}
+
+func mapToValues(m map[string]string) url.Values {
+ v := url.Values{}
+ for name, value := range m {
+ v.Set(name, value)
+ }
+
+ return v
+}
+
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
@@ -359,15 +664,15 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) {
// TLSConfig configures the options for TLS connections.
type TLSConfig struct {
// The CA cert to use for the targets.
- CAFile string `yaml:"ca_file,omitempty"`
+ CAFile string `yaml:"ca_file,omitempty" json:"ca_file,omitempty"`
// The client cert file for the targets.
- CertFile string `yaml:"cert_file,omitempty"`
+ CertFile string `yaml:"cert_file,omitempty" json:"cert_file,omitempty"`
// The client key file for the targets.
- KeyFile string `yaml:"key_file,omitempty"`
+ KeyFile string `yaml:"key_file,omitempty" json:"key_file,omitempty"`
// Used to verify the hostname for the targets.
- ServerName string `yaml:"server_name,omitempty"`
+ ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"`
// Disable target certificate validation.
- InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
+ InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"`
}
// SetDirectory joins any relative file paths with dir.
@@ -427,7 +732,7 @@ type tlsRoundTripper struct {
tlsConfig *tls.Config
}
-func newTLSRoundTripper(
+func NewTLSRoundTripper(
cfg *tls.Config,
caFile string,
newRT func(*tls.Config) (http.RoundTripper, error),
@@ -457,7 +762,7 @@ func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) {
if err != nil {
return nil, nil, err
}
- h := md5.Sum(b)
+ h := sha256.Sum256(b)
return b, h[:], nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index bd4e34745..64dc0eb40 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,7 +18,7 @@ import (
"io"
"net/http"
- "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b6079b31e..84be0643e 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -24,7 +24,7 @@ import (
dto "github.com/prometheus/client_model/go"
- "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/prometheus/common/model"
)
diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
deleted file mode 100644
index bcf68e6f2..000000000
--- a/vendor/github.com/prometheus/common/log/eventlog_formatter.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package log
-
-import (
- "fmt"
- "os"
-
- "golang.org/x/sys/windows/svc/eventlog"
-
- "github.com/sirupsen/logrus"
-)
-
-func init() {
- setEventlogFormatter = func(l logger, name string, debugAsInfo bool) error {
- if name == "" {
- return fmt.Errorf("missing name parameter")
- }
-
- fmter, err := newEventlogger(name, debugAsInfo, l.entry.Logger.Formatter)
- if err != nil {
- fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
- l.Errorf("can't connect logger to eventlog: %v", err)
- return err
- }
- l.entry.Logger.Formatter = fmter
- return nil
- }
-}
-
-type eventlogger struct {
- log *eventlog.Log
- debugAsInfo bool
- wrap logrus.Formatter
-}
-
-func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
- logHandle, err := eventlog.Open(name)
- if err != nil {
- return nil, err
- }
- return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
-}
-
-func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
- data, err := s.wrap.Format(e)
- if err != nil {
- fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
- return data, err
- }
-
- switch e.Level {
- case logrus.PanicLevel:
- fallthrough
- case logrus.FatalLevel:
- fallthrough
- case logrus.ErrorLevel:
- err = s.log.Error(102, e.Message)
- case logrus.WarnLevel:
- err = s.log.Warning(101, e.Message)
- case logrus.InfoLevel:
- err = s.log.Info(100, e.Message)
- case logrus.DebugLevel:
- if s.debugAsInfo {
- err = s.log.Info(100, e.Message)
- }
- default:
- err = s.log.Info(100, e.Message)
- }
-
- if err != nil {
- fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
- }
-
- return data, err
-}
diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go
deleted file mode 100644
index b6adbce13..000000000
--- a/vendor/github.com/prometheus/common/log/log.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package log implements logging via logrus.
-//
-// Deprecated: This package has been replaced with github.com/prometheus/common/promlog.
-
-package log
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
-
- "github.com/sirupsen/logrus"
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-// setSyslogFormatter is nil if the target architecture does not support syslog.
-var setSyslogFormatter func(logger, string, string) error
-
-// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
-var setEventlogFormatter func(logger, string, bool) error
-
-func setJSONFormatter() {
- origLogger.Formatter = &logrus.JSONFormatter{}
-}
-
-type loggerSettings struct {
- level string
- format string
-}
-
-func (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {
- err := baseLogger.SetLevel(s.level)
- if err != nil {
- return err
- }
- err = baseLogger.SetFormat(s.format)
- return err
-}
-
-// AddFlags adds the flags used by this package to the Kingpin application.
-// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
-func AddFlags(a *kingpin.Application) {
- s := loggerSettings{}
- a.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").
- Default(origLogger.Level.String()).
- StringVar(&s.level)
- defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"}
- a.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
- Default(defaultFormat.String()).
- StringVar(&s.format)
- a.Action(s.apply)
-}
-
-// Logger is the interface for loggers used in the Prometheus components.
-type Logger interface {
- Debug(...interface{})
- Debugln(...interface{})
- Debugf(string, ...interface{})
-
- Info(...interface{})
- Infoln(...interface{})
- Infof(string, ...interface{})
-
- Warn(...interface{})
- Warnln(...interface{})
- Warnf(string, ...interface{})
-
- Error(...interface{})
- Errorln(...interface{})
- Errorf(string, ...interface{})
-
- Fatal(...interface{})
- Fatalln(...interface{})
- Fatalf(string, ...interface{})
-
- With(key string, value interface{}) Logger
-
- SetFormat(string) error
- SetLevel(string) error
-}
-
-type logger struct {
- entry *logrus.Entry
-}
-
-func (l logger) With(key string, value interface{}) Logger {
- return logger{l.entry.WithField(key, value)}
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func (l logger) Debug(args ...interface{}) {
- l.sourced().Debug(args...)
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func (l logger) Debugln(args ...interface{}) {
- l.sourced().Debugln(args...)
-}
-
-// Debugf logs a message at level Debug on the standard logger.
-func (l logger) Debugf(format string, args ...interface{}) {
- l.sourced().Debugf(format, args...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func (l logger) Info(args ...interface{}) {
- l.sourced().Info(args...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func (l logger) Infoln(args ...interface{}) {
- l.sourced().Infoln(args...)
-}
-
-// Infof logs a message at level Info on the standard logger.
-func (l logger) Infof(format string, args ...interface{}) {
- l.sourced().Infof(format, args...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func (l logger) Warn(args ...interface{}) {
- l.sourced().Warn(args...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func (l logger) Warnln(args ...interface{}) {
- l.sourced().Warnln(args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func (l logger) Warnf(format string, args ...interface{}) {
- l.sourced().Warnf(format, args...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func (l logger) Error(args ...interface{}) {
- l.sourced().Error(args...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func (l logger) Errorln(args ...interface{}) {
- l.sourced().Errorln(args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func (l logger) Errorf(format string, args ...interface{}) {
- l.sourced().Errorf(format, args...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func (l logger) Fatal(args ...interface{}) {
- l.sourced().Fatal(args...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func (l logger) Fatalln(args ...interface{}) {
- l.sourced().Fatalln(args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger.
-func (l logger) Fatalf(format string, args ...interface{}) {
- l.sourced().Fatalf(format, args...)
-}
-
-func (l logger) SetLevel(level string) error {
- lvl, err := logrus.ParseLevel(level)
- if err != nil {
- return err
- }
-
- l.entry.Logger.Level = lvl
- return nil
-}
-
-func (l logger) SetFormat(format string) error {
- u, err := url.Parse(format)
- if err != nil {
- return err
- }
- if u.Scheme != "logger" {
- return fmt.Errorf("invalid scheme %s", u.Scheme)
- }
- jsonq := u.Query().Get("json")
- if jsonq == "true" {
- setJSONFormatter()
- }
-
- switch u.Opaque {
- case "syslog":
- if setSyslogFormatter == nil {
- return fmt.Errorf("system does not support syslog")
- }
- appname := u.Query().Get("appname")
- facility := u.Query().Get("local")
- return setSyslogFormatter(l, appname, facility)
- case "eventlog":
- if setEventlogFormatter == nil {
- return fmt.Errorf("system does not support eventlog")
- }
- name := u.Query().Get("name")
- debugAsInfo := false
- debugAsInfoRaw := u.Query().Get("debugAsInfo")
- if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
- debugAsInfo = parsedDebugAsInfo
- }
- return setEventlogFormatter(l, name, debugAsInfo)
- case "stdout":
- l.entry.Logger.Out = os.Stdout
- case "stderr":
- l.entry.Logger.Out = os.Stderr
- default:
- return fmt.Errorf("unsupported logger %q", u.Opaque)
- }
- return nil
-}
-
-// sourced adds a source field to the logger that contains
-// the file name and line where the logging happened.
-func (l logger) sourced() *logrus.Entry {
- _, file, line, ok := runtime.Caller(2)
- if !ok {
- file = "??>"
- line = 1
- } else {
- slash := strings.LastIndex(file, "/")
- file = file[slash+1:]
- }
- return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
-}
-
-var origLogger = logrus.New()
-var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
-
-// Base returns the default Logger logging to
-func Base() Logger {
- return baseLogger
-}
-
-// NewLogger returns a new Logger logging to out.
-func NewLogger(w io.Writer) Logger {
- l := logrus.New()
- l.Out = w
- return logger{entry: logrus.NewEntry(l)}
-}
-
-// NewNopLogger returns a logger that discards all log messages.
-func NewNopLogger() Logger {
- l := logrus.New()
- l.Out = ioutil.Discard
- return logger{entry: logrus.NewEntry(l)}
-}
-
-// With adds a field to the logger.
-func With(key string, value interface{}) Logger {
- return baseLogger.With(key, value)
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func Debug(args ...interface{}) {
- baseLogger.sourced().Debug(args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger.
-func Debugln(args ...interface{}) {
- baseLogger.sourced().Debugln(args...)
-}
-
-// Debugf logs a message at level Debug on the standard logger.
-func Debugf(format string, args ...interface{}) {
- baseLogger.sourced().Debugf(format, args...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func Info(args ...interface{}) {
- baseLogger.sourced().Info(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger.
-func Infoln(args ...interface{}) {
- baseLogger.sourced().Infoln(args...)
-}
-
-// Infof logs a message at level Info on the standard logger.
-func Infof(format string, args ...interface{}) {
- baseLogger.sourced().Infof(format, args...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func Warn(args ...interface{}) {
- baseLogger.sourced().Warn(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger.
-func Warnln(args ...interface{}) {
- baseLogger.sourced().Warnln(args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func Warnf(format string, args ...interface{}) {
- baseLogger.sourced().Warnf(format, args...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func Error(args ...interface{}) {
- baseLogger.sourced().Error(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger.
-func Errorln(args ...interface{}) {
- baseLogger.sourced().Errorln(args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func Errorf(format string, args ...interface{}) {
- baseLogger.sourced().Errorf(format, args...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func Fatal(args ...interface{}) {
- baseLogger.sourced().Fatal(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger.
-func Fatalln(args ...interface{}) {
- baseLogger.sourced().Fatalln(args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger.
-func Fatalf(format string, args ...interface{}) {
- baseLogger.sourced().Fatalf(format, args...)
-}
-
-// AddHook adds hook to Prometheus' original logger.
-func AddHook(hook logrus.Hook) {
- origLogger.Hooks.Add(hook)
-}
-
-type errorLogWriter struct{}
-
-func (errorLogWriter) Write(b []byte) (int, error) {
- baseLogger.sourced().Error(string(b))
- return len(b), nil
-}
-
-// NewErrorLogger returns a log.Logger that is meant to be used
-// in the ErrorLog field of an http.Server to log HTTP server errors.
-func NewErrorLogger() *log.Logger {
- return log.New(&errorLogWriter{}, "", 0)
-}
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
deleted file mode 100644
index f882f2f84..000000000
--- a/vendor/github.com/prometheus/common/log/syslog_formatter.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows,!nacl,!plan9
-
-package log
-
-import (
- "fmt"
- "log/syslog"
- "os"
-
- "github.com/sirupsen/logrus"
-)
-
-var _ logrus.Formatter = (*syslogger)(nil)
-
-func init() {
- setSyslogFormatter = func(l logger, appname, local string) error {
- if appname == "" {
- return fmt.Errorf("missing appname parameter")
- }
- if local == "" {
- return fmt.Errorf("missing local parameter")
- }
-
- fmter, err := newSyslogger(appname, local, l.entry.Logger.Formatter)
- if err != nil {
- fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
- l.entry.Errorf("can't connect logger to syslog: %v", err)
- return err
- }
- l.entry.Logger.Formatter = fmter
- return nil
- }
-}
-
-var prefixTag []byte
-
-type syslogger struct {
- wrap logrus.Formatter
- out *syslog.Writer
-}
-
-func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) {
- priority, err := getFacility(facility)
- if err != nil {
- return nil, err
- }
- out, err := syslog.New(priority, appname)
- _, isJSON := fmter.(*logrus.JSONFormatter)
- if isJSON {
- // add cee tag to json formatted syslogs
- prefixTag = []byte("@cee:")
- }
- return &syslogger{
- out: out,
- wrap: fmter,
- }, err
-}
-
-func getFacility(facility string) (syslog.Priority, error) {
- switch facility {
- case "0":
- return syslog.LOG_LOCAL0, nil
- case "1":
- return syslog.LOG_LOCAL1, nil
- case "2":
- return syslog.LOG_LOCAL2, nil
- case "3":
- return syslog.LOG_LOCAL3, nil
- case "4":
- return syslog.LOG_LOCAL4, nil
- case "5":
- return syslog.LOG_LOCAL5, nil
- case "6":
- return syslog.LOG_LOCAL6, nil
- case "7":
- return syslog.LOG_LOCAL7, nil
- }
- return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility)
-}
-
-func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
- data, err := s.wrap.Format(e)
- if err != nil {
- fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err)
- return data, err
- }
- // only append tag to data sent to syslog (line), not to what
- // is returned
- line := string(append(prefixTag, data...))
-
- switch e.Level {
- case logrus.PanicLevel:
- err = s.out.Crit(line)
- case logrus.FatalLevel:
- err = s.out.Crit(line)
- case logrus.ErrorLevel:
- err = s.out.Err(line)
- case logrus.WarnLevel:
- err = s.out.Warning(line)
- case logrus.InfoLevel:
- err = s.out.Info(line)
- case logrus.DebugLevel:
- err = s.out.Debug(line)
- default:
- err = s.out.Notice(line)
- }
-
- if err != nil {
- fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err)
- }
-
- return data, err
-}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 41051a01a..ef8956335 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -45,6 +45,14 @@ const (
// scrape a target.
MetricsPathLabel = "__metrics_path__"
+ // ScrapeIntervalLabel is the name of the label that holds the scrape interval
+ // used to scrape a target.
+ ScrapeIntervalLabel = "__scrape_interval__"
+
+ // ScrapeTimeoutLabel is the name of the label that holds the scrape
+ // timeout used to scrape a target.
+ ScrapeTimeoutLabel = "__scrape_timeout__"
+
// ReservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
ReservedLabelPrefix = "__"
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index c40e6403c..7f67b16e4 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -14,6 +14,8 @@
package model
import (
+ "encoding/json"
+ "errors"
"fmt"
"math"
"regexp"
@@ -201,13 +203,23 @@ func ParseDuration(durationStr string) (Duration, error) {
// Parse the match at pos `pos` in the regex and use `mult` to turn that
// into ms, then add that value to the total parsed duration.
+ var overflowErr error
m := func(pos int, mult time.Duration) {
if matches[pos] == "" {
return
}
n, _ := strconv.Atoi(matches[pos])
+
+ // Check if the provided duration overflows time.Duration (> ~ 290years).
+ if n > int((1<<63-1)/mult/time.Millisecond) {
+ overflowErr = errors.New("duration out of range")
+ }
d := time.Duration(n) * time.Millisecond
dur += d * mult
+
+ if dur < 0 {
+ overflowErr = errors.New("duration out of range")
+ }
}
m(2, 1000*60*60*24*365) // y
@@ -218,7 +230,7 @@ func ParseDuration(durationStr string) (Duration, error) {
m(12, 1000) // s
m(14, 1) // ms
- return Duration(dur), nil
+ return Duration(dur), overflowErr
}
func (d Duration) String() string {
@@ -254,6 +266,37 @@ func (d Duration) String() string {
return r
}
+// MarshalJSON implements the json.Marshaler interface.
+func (d Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(d.String())
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *Duration) UnmarshalJSON(bytes []byte) error {
+ var s string
+ if err := json.Unmarshal(bytes, &s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (d *Duration) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (d *Duration) UnmarshalText(text []byte) error {
+ var err error
+ *d, err = ParseDuration(string(text))
+ return err
+}
+
// MarshalYAML implements the yaml.Marshaler interface.
func (d Duration) MarshalYAML() (interface{}, error) {
return d.String(), nil
diff --git a/vendor/github.com/prometheus/common/promlog/log.go b/vendor/github.com/prometheus/common/promlog/log.go
index 5062e2805..adb0599ce 100644
--- a/vendor/github.com/prometheus/common/promlog/log.go
+++ b/vendor/github.com/prometheus/common/promlog/log.go
@@ -18,10 +18,11 @@ package promlog
import (
"os"
+ "sync"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
)
@@ -42,6 +43,23 @@ type AllowedLevel struct {
o level.Option
}
+func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ type plain string
+ if err := unmarshal((*plain)(&s)); err != nil {
+ return err
+ }
+ if s == "" {
+ return nil
+ }
+ lo := &AllowedLevel{}
+ if err := lo.Set(s); err != nil {
+ return err
+ }
+ *l = *lo
+ return nil
+}
+
func (l *AllowedLevel) String() string {
return l.s
}
@@ -106,3 +124,52 @@ func New(config *Config) log.Logger {
l = log.With(l, "ts", timestampFormat, "caller", log.DefaultCaller)
return l
}
+
+// NewDynamic returns a new leveled logger. Each logged line will be annotated
+// with a timestamp. The output always goes to stderr. Some properties can be
+// changed, like the level.
+func NewDynamic(config *Config) *logger {
+ var l log.Logger
+ if config.Format != nil && config.Format.s == "json" {
+ l = log.NewJSONLogger(log.NewSyncWriter(os.Stderr))
+ } else {
+ l = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+ }
+ l = log.With(l, "ts", timestampFormat, "caller", log.DefaultCaller)
+
+ lo := &logger{
+ base: l,
+ leveled: l,
+ }
+ if config.Level != nil {
+ lo.SetLevel(config.Level)
+ }
+ return lo
+}
+
+type logger struct {
+ base log.Logger
+ leveled log.Logger
+ currentLevel *AllowedLevel
+ mtx sync.Mutex
+}
+
+// Log implements logger.Log.
+func (l *logger) Log(keyvals ...interface{}) error {
+ l.mtx.Lock()
+ defer l.mtx.Unlock()
+ return l.leveled.Log(keyvals...)
+}
+
+// SetLevel changes the log level.
+func (l *logger) SetLevel(lvl *AllowedLevel) {
+ l.mtx.Lock()
+ defer l.mtx.Unlock()
+ if lvl != nil {
+ if l.currentLevel != nil && l.currentLevel.s != lvl.s {
+ _ = l.base.Log("msg", "Log level changed", "prev", l.currentLevel, "current", lvl)
+ }
+ l.currentLevel = lvl
+ }
+ l.leveled = level.NewFilter(l.base, lvl.o)
+}
diff --git a/vendor/github.com/prometheus/common/sigv4/LICENSE b/vendor/github.com/prometheus/common/sigv4/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/sigv4/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/sigv4/Makefile b/vendor/github.com/prometheus/common/sigv4/Makefile
new file mode 100644
index 000000000..e7be5dd9a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/sigv4/Makefile
@@ -0,0 +1,22 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+include ../Makefile.common
+
+.PHONY: test
+ @echo ">> Running sigv4 tests"
+ test:: deps check_license unused common-test
+
+ifeq (,$(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(7|8|9|10)\.'))
+test:: lint
+endif
diff --git a/vendor/github.com/prometheus/common/sigv4/go.mod b/vendor/github.com/prometheus/common/sigv4/go.mod
new file mode 100644
index 000000000..60f8743b6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/sigv4/go.mod
@@ -0,0 +1,11 @@
+module github.com/prometheus/common/sigv4
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go v1.38.35
+ github.com/prometheus/client_golang v1.11.0
+ github.com/prometheus/common v0.29.0
+ github.com/stretchr/testify v1.7.0
+ gopkg.in/yaml.v2 v2.4.0
+)
diff --git a/vendor/github.com/prometheus/common/sigv4/go.sum b/vendor/github.com/prometheus/common/sigv4/go.sum
new file mode 100644
index 000000000..3ac369f1b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/sigv4/go.sum
@@ -0,0 +1,479 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/aws/aws-sdk-go v1.38.35 h1:7AlAO0FC+8nFjxiGKEmq0QLpiA8/XFr6eIxgRTwkdTg=
+github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE=
+github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/vendor/github.com/prometheus/common/sigv4/sigv4.go b/vendor/github.com/prometheus/common/sigv4/sigv4.go
new file mode 100644
index 000000000..dd140bb68
--- /dev/null
+++ b/vendor/github.com/prometheus/common/sigv4/sigv4.go
@@ -0,0 +1,137 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sigv4
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/textproto"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/session"
+ signer "github.com/aws/aws-sdk-go/aws/signer/v4"
+)
+
+var sigv4HeaderDenylist = []string{
+ "uber-trace-id",
+}
+
+type sigV4RoundTripper struct {
+ region string
+ next http.RoundTripper
+ pool sync.Pool
+
+ signer *signer.Signer
+}
+
+// NewSigV4RoundTripper returns a new http.RoundTripper that will sign requests
+// using Amazon's Signature Verification V4 signing procedure. The request will
+// then be handed off to the next RoundTripper provided by next. If next is nil,
+// http.DefaultTransport will be used.
+//
+// Credentials for signing are retrieved using the the default AWS credential
+// chain. If credentials cannot be found, an error will be returned.
+func NewSigV4RoundTripper(cfg *SigV4Config, next http.RoundTripper) (http.RoundTripper, error) {
+ if next == nil {
+ next = http.DefaultTransport
+ }
+
+ creds := credentials.NewStaticCredentials(cfg.AccessKey, string(cfg.SecretKey), "")
+ if cfg.AccessKey == "" && cfg.SecretKey == "" {
+ creds = nil
+ }
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Region: aws.String(cfg.Region),
+ Credentials: creds,
+ },
+ Profile: cfg.Profile,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not create new AWS session: %w", err)
+ }
+ if _, err := sess.Config.Credentials.Get(); err != nil {
+ return nil, fmt.Errorf("could not get SigV4 credentials: %w", err)
+ }
+ if aws.StringValue(sess.Config.Region) == "" {
+ return nil, fmt.Errorf("region not configured in sigv4 or in default credentials chain")
+ }
+
+ signerCreds := sess.Config.Credentials
+ if cfg.RoleARN != "" {
+ signerCreds = stscreds.NewCredentials(sess, cfg.RoleARN)
+ }
+
+ rt := &sigV4RoundTripper{
+ region: cfg.Region,
+ next: next,
+ signer: signer.NewSigner(signerCreds),
+ }
+ rt.pool.New = rt.newBuf
+ return rt, nil
+}
+
+func (rt *sigV4RoundTripper) newBuf() interface{} {
+ return bytes.NewBuffer(make([]byte, 0, 1024))
+}
+
+func (rt *sigV4RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ // rt.signer.Sign needs a seekable body, so we replace the body with a
+ // buffered reader filled with the contents of original body.
+ buf := rt.pool.Get().(*bytes.Buffer)
+ defer func() {
+ buf.Reset()
+ rt.pool.Put(buf)
+ }()
+ if _, err := io.Copy(buf, req.Body); err != nil {
+ return nil, err
+ }
+ // Close the original body since we don't need it anymore.
+ _ = req.Body.Close()
+
+ // Ensure our seeker is back at the start of the buffer once we return.
+ var seeker io.ReadSeeker = bytes.NewReader(buf.Bytes())
+ defer func() {
+ _, _ = seeker.Seek(0, io.SeekStart)
+ }()
+ req.Body = ioutil.NopCloser(seeker)
+
+ // Clone the request and trim out headers that we don't want to sign.
+ signReq := req.Clone(req.Context())
+ for _, header := range sigv4HeaderDenylist {
+ signReq.Header.Del(header)
+ }
+
+ headers, err := rt.signer.Sign(signReq, seeker, "aps", rt.region, time.Now().UTC())
+ if err != nil {
+ return nil, fmt.Errorf("failed to sign request: %w", err)
+ }
+
+ // Copy over signed headers. Authorization header is not returned by
+ // rt.signer.Sign and needs to be copied separately.
+ for k, v := range headers {
+ req.Header[textproto.CanonicalMIMEHeaderKey(k)] = v
+ }
+ req.Header.Set("Authorization", signReq.Header.Get("Authorization"))
+
+ return rt.next.RoundTrip(req)
+}
diff --git a/vendor/github.com/prometheus/common/sigv4/sigv4_config.go b/vendor/github.com/prometheus/common/sigv4/sigv4_config.go
new file mode 100644
index 000000000..776fe764a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/sigv4/sigv4_config.go
@@ -0,0 +1,47 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sigv4
+
+import (
+ "fmt"
+
+ "github.com/prometheus/common/config"
+)
+
+// SigV4Config is the configuration for signing remote write requests with
+// AWS's SigV4 verification process. Empty values will be retrieved using the
+// AWS default credentials chain.
+type SigV4Config struct {
+ Region string `yaml:"region,omitempty"`
+ AccessKey string `yaml:"access_key,omitempty"`
+ SecretKey config.Secret `yaml:"secret_key,omitempty"`
+ Profile string `yaml:"profile,omitempty"`
+ RoleARN string `yaml:"role_arn,omitempty"`
+}
+
+func (c *SigV4Config) Validate() error {
+ if (c.AccessKey == "") != (c.SecretKey == "") {
+ return fmt.Errorf("must provide a AWS SigV4 Access key and Secret Key if credentials are specified in the SigV4 config")
+ }
+ return nil
+}
+
+func (c *SigV4Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ type plain SigV4Config
+ *c = SigV4Config{}
+ if err := unmarshal((*plain)(c)); err != nil {
+ return err
+ }
+ return c.Validate()
+}
diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go
index 06fb1548f..42eb5d432 100644
--- a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go
+++ b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go
@@ -22,8 +22,8 @@ import (
"net/http"
"path/filepath"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"gopkg.in/yaml.v2"
@@ -154,7 +154,7 @@ func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) {
switch c.ClientAuth {
case "RequestClientCert":
cfg.ClientAuth = tls.RequestClientCert
- case "RequireClientCert":
+ case "RequireAnyClientCert", "RequireClientCert": // Preserved for backwards compatibility.
cfg.ClientAuth = tls.RequireAnyClientCert
case "VerifyClientCertIfGiven":
cfg.ClientAuth = tls.VerifyClientCertIfGiven
diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/users.go b/vendor/github.com/prometheus/exporter-toolkit/web/users.go
index 8168dabf4..cf8105fc8 100644
--- a/vendor/github.com/prometheus/exporter-toolkit/web/users.go
+++ b/vendor/github.com/prometheus/exporter-toolkit/web/users.go
@@ -20,7 +20,7 @@ import (
"net/http"
"sync"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"golang.org/x/crypto/bcrypt"
)
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 9320176ca..3ac29c636 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),)
endif
endif
-PROMU_VERSION ?= 0.5.0
+PROMU_VERSION ?= 0.7.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
@@ -245,10 +245,12 @@ common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
new file mode 100644
index 000000000..67741f015
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/SECURITY.md
@@ -0,0 +1,6 @@
+# Reporting a security issue
+
+The Prometheus security policy, including how to report vulnerabilities, can be
+found here:
+
+https://prometheus.io/docs/operating/security/
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index 916c9182a..4e47e6172 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -36,7 +36,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
- return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
+ return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@@ -59,7 +59,7 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
- return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
+ return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
}
entries = append(entries, entry)
} else {
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index 10bd067a0..f5b7939b2 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
- return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+ return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index b9fb589aa..5623b24a1 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -19,6 +19,7 @@ import (
"bufio"
"bytes"
"errors"
+ "fmt"
"regexp"
"strconv"
"strings"
@@ -77,7 +78,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -192,7 +193,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
- return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -256,7 +257,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
- return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -281,7 +282,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
- return nil, errors.New("Invalid line found in cpuinfo: " + line)
+ return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
@@ -313,6 +314,22 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
return nil, err
}
cpuinfo[i].CPUMHz = v
+ case "physical id":
+ cpuinfo[i].PhysicalID = field[1]
+ case "core id":
+ cpuinfo[i].CoreID = field[1]
+ case "cpu cores":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUCores = uint(v)
+ case "siblings":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Siblings = uint(v)
}
}
@@ -325,7 +342,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -367,7 +384,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -412,7 +429,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
new file mode 100644
index 000000000..e83c2e207
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+// +build riscv riscv64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoRISCV
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
index a95893375..5048ad1f2 100644
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -55,12 +55,12 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
- return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
+ return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
+ return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
}
return crypto, nil
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
index 12494d742..1e76173da 100644
--- a/vendor/github.com/prometheus/procfs/fixtures.ttar
+++ b/vendor/github.com/prometheus/procfs/fixtures.ttar
@@ -111,7 +111,7 @@ Max core file size 0 unlimited bytes
Max resident set unlimited unlimited bytes
Max processes 62898 62898 processes
Max open files 2048 4096 files
-Max locked memory 65536 65536 bytes
+Max locked memory 18446744073708503040 18446744073708503040 bytes
Max address space 8589934592 unlimited bytes
Max file locks unlimited unlimited locks
Max pending signals 62898 62898 signals
@@ -1080,7 +1080,6 @@ internal : yes
type : skcipher
async : yes
blocksize : 1
-min keysize : 16
max keysize : 32
ivsize : 16
chunksize : 16
@@ -1839,6 +1838,7 @@ min keysize : 16
max keysize : 32
Mode: 444
+Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/diskstats
Lines: 52
@@ -2129,6 +2129,24 @@ Lines: 6
4 1FB3C 0 1282A8F 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/protocols
+Lines: 14
+protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
+PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
+PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
+RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
+UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
+UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
+TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y
+UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n
+UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
+PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
+RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
+UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
+TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y
+NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/proc/net/rpc
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -2186,10 +2204,25 @@ Lines: 1
00015c73 00020e76 F0000769 00000000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/tcp
+Lines: 4
+ sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
+ 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
+ 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
+ 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/tcp6
+Lines: 3
+ sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
+ 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
+ 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp
Lines: 4
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- 0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
+ 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
Mode: 644
@@ -2292,6 +2325,312 @@ Mode: 644
Path: fixtures/proc/self
SymlinkTo: 26231
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/slabinfo
+Lines: 302
+slabinfo - version: 2.1
+# name : tunables : slabdata
+pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0
+pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
+nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0
+nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0
+kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
+kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0
+kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0
+pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0
+x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0
+iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
+ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0
+bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
+bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0
+bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0
+fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0
+fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
+ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0
+squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
+fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
+fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
+xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0
+xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
+xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
+xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0
+xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0
+xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0
+xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
+xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
+xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
+nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
+nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
+nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0
+nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0
+nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
+nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
+rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
+rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
+rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
+fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
+jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
+jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0
+reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
+bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
+btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
+btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0
+ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0
+ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
+ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0
+ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0
+ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0
+ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0
+ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0
+ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0
+ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0
+jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0
+jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0
+jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
+jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0
+jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0
+jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
+ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0
+mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
+dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0
+dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
+dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
+dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0
+kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0
+io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
+dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0
+dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
+aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
+asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
+sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
+qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
+sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0
+scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0
+virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
+L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
+L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
+ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
+fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
+ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
+ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0
+RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0
+UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0
+UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0
+tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0
+TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0
+uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
+sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0
+sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
+sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0
+sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0
+sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
+btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0
+bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0
+mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0
+isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0
+io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
+kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0
+aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
+userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
+fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
+fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0
+dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
+dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
+bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
+fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
+audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
+pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0
+posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0
+iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
+iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0
+iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0
+UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0
+ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0
+ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
+tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
+inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
+xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
+ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0
+ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0
+ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0
+PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
+RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0
+UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0
+tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0
+request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0
+TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0
+hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0
+dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
+bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
+eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0
+eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0
+inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0
+scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
+bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0
+request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0
+blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0
+bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0
+biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0
+biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0
+biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0
+biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
+bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
+khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0
+ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
+ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
+ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
+user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0
+uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
+dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
+dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0
+dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
+dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0
+audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0
+sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0
+skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
+skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0
+skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0
+configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0
+file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0
+file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0
+fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0
+net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0
+task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0
+taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0
+proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0
+pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0
+proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0
+seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0
+sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
+bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
+shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0
+kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0
+kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0
+mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
+filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0
+inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0
+dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0
+names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0
+hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
+ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
+avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
+avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
+avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
+avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
+iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
+lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0
+lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0
+key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
+buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0
+uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
+nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
+vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0
+mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0
+fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0
+files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0
+signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0
+sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0
+task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0
+cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0
+anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0
+anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0
+pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0
+Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0
+Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0
+Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
+Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0
+Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0
+trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0
+ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0
+pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0
+radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0
+task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
+vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0
+dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
+dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
+kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0
+kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0
+kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
+kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0
+kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0
+kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0
+kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0
+kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0
+kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0
+kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0
+kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0
+kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0
+kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0
+kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0
+kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0
+kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0
+kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0
+kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/stat
Lines: 16
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
@@ -4639,6 +4978,35 @@ Mode: 644
Directory: fixtures/sys/devices/system
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/node
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/node/node1
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/node/node1/vmstat
+Lines: 6
+nr_free_pages 1
+nr_zone_inactive_anon 2
+nr_zone_active_anon 3
+nr_zone_inactive_file 4
+nr_zone_active_file 5
+nr_zone_unevictable 6
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/node/node2
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/node/node2/vmstat
+Lines: 6
+nr_free_pages 7
+nr_zone_inactive_anon 8
+nr_zone_active_anon 9
+nr_zone_inactive_file 10
+nr_zone_active_file 11
+nr_zone_unevictable 12
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/clocksource
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index 8783cf3cc..f8070e6e2 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
- return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err)
+ return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
}
return *m, nil
diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod
index ded48253c..ba6681f52 100644
--- a/vendor/github.com/prometheus/procfs/go.mod
+++ b/vendor/github.com/prometheus/procfs/go.mod
@@ -1,9 +1,9 @@
module github.com/prometheus/procfs
-go 1.12
+go 1.13
require (
- github.com/google/go-cmp v0.3.1
- golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
- golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
+ github.com/google/go-cmp v0.5.4
+ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
+ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)
diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum
index 54b5f3303..7ceaf56b7 100644
--- a/vendor/github.com/prometheus/procfs/go.sum
+++ b/vendor/github.com/prometheus/procfs/go.sum
@@ -1,6 +1,8 @@
-github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 565e89e42..0040753b1 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -39,10 +39,10 @@ type FS string
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
if err != nil {
- return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
}
if !info.IsDir() {
- return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
}
return FS(mountPoint), nil
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
index 00bbe1441..0cce190ec 100644
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
- return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
+ return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
- return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
+ return nil, fmt.Errorf("could not parse load %q: %w", load, err)
}
}
return &LoadAvg{
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index 98e37aa8c..4c4493bfa 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -22,8 +22,9 @@ import (
)
var (
- statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
- recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+ statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+ componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
)
// MDStat holds info parsed from /proc/mdstat.
@@ -44,6 +45,8 @@ type MDStat struct {
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
+ // Name of md component devices
+ Devices []string
}
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
@@ -56,7 +59,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
- return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
+ return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@@ -82,10 +85,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
- return nil, fmt.Errorf(
- "error parsing %s: too few lines for md device",
- mdName,
- )
+ return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
@@ -94,7 +94,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
- return nil, fmt.Errorf("error parsing md device lines: %s", err)
+ return nil, fmt.Errorf("error parsing md device lines: %w", err)
}
syncLineIdx := i + 2
@@ -126,7 +126,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
- return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
+ return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
}
}
}
@@ -140,6 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
+ Devices: evalComponentDevices(deviceFields),
})
}
@@ -151,7 +152,7 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
+ return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@@ -171,12 +172,12 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
+ return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
+ return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
return active, total, size, nil
@@ -190,8 +191,23 @@ func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
- return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
+ return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
}
return syncedBlocks, nil
}
+
+func evalComponentDevices(deviceFields []string) []string {
+ mdComponentDevices := make([]string, 0)
+ if len(deviceFields) > 3 {
+ for _, field := range deviceFields[4:] {
+ match := componentDeviceRE.FindStringSubmatch(field)
+ if match == nil {
+ continue
+ }
+ mdComponentDevices = append(mdComponentDevices, match[1])
+ }
+ }
+
+ return mdComponentDevices
+}
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
index 50dab4bcd..f65e174e5 100644
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -28,9 +28,9 @@ import (
type Meminfo struct {
// Total usable ram (i.e. physical ram minus a few reserved
// bits and the kernel binary code)
- MemTotal uint64
+ MemTotal *uint64
// The sum of LowFree+HighFree
- MemFree uint64
+ MemFree *uint64
// An estimate of how much memory is available for starting
// new applications, without swapping. Calculated from
// MemFree, SReclaimable, the size of the file LRU lists, and
@@ -39,59 +39,59 @@ type Meminfo struct {
// well, and that not all reclaimable slab will be
// reclaimable, due to items being in use. The impact of those
// factors will vary from system to system.
- MemAvailable uint64
+ MemAvailable *uint64
// Relatively temporary storage for raw disk blocks shouldn't
// get tremendously large (20MB or so)
- Buffers uint64
- Cached uint64
+ Buffers *uint64
+ Cached *uint64
// Memory that once was swapped out, is swapped back in but
// still also is in the swapfile (if memory is needed it
// doesn't need to be swapped out AGAIN because it is already
// in the swapfile. This saves I/O)
- SwapCached uint64
+ SwapCached *uint64
// Memory that has been used more recently and usually not
// reclaimed unless absolutely necessary.
- Active uint64
+ Active *uint64
// Memory which has been less recently used. It is more
// eligible to be reclaimed for other purposes
- Inactive uint64
- ActiveAnon uint64
- InactiveAnon uint64
- ActiveFile uint64
- InactiveFile uint64
- Unevictable uint64
- Mlocked uint64
+ Inactive *uint64
+ ActiveAnon *uint64
+ InactiveAnon *uint64
+ ActiveFile *uint64
+ InactiveFile *uint64
+ Unevictable *uint64
+ Mlocked *uint64
// total amount of swap space available
- SwapTotal uint64
+ SwapTotal *uint64
// Memory which has been evicted from RAM, and is temporarily
// on the disk
- SwapFree uint64
+ SwapFree *uint64
// Memory which is waiting to get written back to the disk
- Dirty uint64
+ Dirty *uint64
// Memory which is actively being written back to the disk
- Writeback uint64
+ Writeback *uint64
// Non-file backed pages mapped into userspace page tables
- AnonPages uint64
+ AnonPages *uint64
// files which have been mapped, such as libraries
- Mapped uint64
- Shmem uint64
+ Mapped *uint64
+ Shmem *uint64
// in-kernel data structures cache
- Slab uint64
+ Slab *uint64
// Part of Slab, that might be reclaimed, such as caches
- SReclaimable uint64
+ SReclaimable *uint64
// Part of Slab, that cannot be reclaimed on memory pressure
- SUnreclaim uint64
- KernelStack uint64
+ SUnreclaim *uint64
+ KernelStack *uint64
// amount of memory dedicated to the lowest level of page
// tables.
- PageTables uint64
+ PageTables *uint64
// NFS pages sent to the server, but not yet committed to
// stable storage
- NFSUnstable uint64
+ NFSUnstable *uint64
// Memory used for block device "bounce buffers"
- Bounce uint64
+ Bounce *uint64
// Memory used by FUSE for temporary writeback buffers
- WritebackTmp uint64
+ WritebackTmp *uint64
// Based on the overcommit ratio ('vm.overcommit_ratio'),
// this is the total amount of memory currently available to
// be allocated on the system. This limit is only adhered to
@@ -105,7 +105,7 @@ type Meminfo struct {
// yield a CommitLimit of 7.3G.
// For more details, see the memory overcommit documentation
// in vm/overcommit-accounting.
- CommitLimit uint64
+ CommitLimit *uint64
// The amount of memory presently allocated on the system.
// The committed memory is a sum of all of the memory which
// has been allocated by processes, even if it has not been
@@ -119,27 +119,27 @@ type Meminfo struct {
// This is useful if one needs to guarantee that processes will
// not fail due to lack of memory once that memory has been
// successfully allocated.
- CommittedAS uint64
+ CommittedAS *uint64
// total size of vmalloc memory area
- VmallocTotal uint64
+ VmallocTotal *uint64
// amount of vmalloc area which is used
- VmallocUsed uint64
+ VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
- VmallocChunk uint64
- HardwareCorrupted uint64
- AnonHugePages uint64
- ShmemHugePages uint64
- ShmemPmdMapped uint64
- CmaTotal uint64
- CmaFree uint64
- HugePagesTotal uint64
- HugePagesFree uint64
- HugePagesRsvd uint64
- HugePagesSurp uint64
- Hugepagesize uint64
- DirectMap4k uint64
- DirectMap2M uint64
- DirectMap1G uint64
+ VmallocChunk *uint64
+ HardwareCorrupted *uint64
+ AnonHugePages *uint64
+ ShmemHugePages *uint64
+ ShmemPmdMapped *uint64
+ CmaTotal *uint64
+ CmaFree *uint64
+ HugePagesTotal *uint64
+ HugePagesFree *uint64
+ HugePagesRsvd *uint64
+ HugePagesSurp *uint64
+ Hugepagesize *uint64
+ DirectMap4k *uint64
+ DirectMap2M *uint64
+ DirectMap1G *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
- return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err)
+ return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
}
return *m, nil
@@ -175,101 +175,101 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
switch fields[0] {
case "MemTotal:":
- m.MemTotal = v
+ m.MemTotal = &v
case "MemFree:":
- m.MemFree = v
+ m.MemFree = &v
case "MemAvailable:":
- m.MemAvailable = v
+ m.MemAvailable = &v
case "Buffers:":
- m.Buffers = v
+ m.Buffers = &v
case "Cached:":
- m.Cached = v
+ m.Cached = &v
case "SwapCached:":
- m.SwapCached = v
+ m.SwapCached = &v
case "Active:":
- m.Active = v
+ m.Active = &v
case "Inactive:":
- m.Inactive = v
+ m.Inactive = &v
case "Active(anon):":
- m.ActiveAnon = v
+ m.ActiveAnon = &v
case "Inactive(anon):":
- m.InactiveAnon = v
+ m.InactiveAnon = &v
case "Active(file):":
- m.ActiveFile = v
+ m.ActiveFile = &v
case "Inactive(file):":
- m.InactiveFile = v
+ m.InactiveFile = &v
case "Unevictable:":
- m.Unevictable = v
+ m.Unevictable = &v
case "Mlocked:":
- m.Mlocked = v
+ m.Mlocked = &v
case "SwapTotal:":
- m.SwapTotal = v
+ m.SwapTotal = &v
case "SwapFree:":
- m.SwapFree = v
+ m.SwapFree = &v
case "Dirty:":
- m.Dirty = v
+ m.Dirty = &v
case "Writeback:":
- m.Writeback = v
+ m.Writeback = &v
case "AnonPages:":
- m.AnonPages = v
+ m.AnonPages = &v
case "Mapped:":
- m.Mapped = v
+ m.Mapped = &v
case "Shmem:":
- m.Shmem = v
+ m.Shmem = &v
case "Slab:":
- m.Slab = v
+ m.Slab = &v
case "SReclaimable:":
- m.SReclaimable = v
+ m.SReclaimable = &v
case "SUnreclaim:":
- m.SUnreclaim = v
+ m.SUnreclaim = &v
case "KernelStack:":
- m.KernelStack = v
+ m.KernelStack = &v
case "PageTables:":
- m.PageTables = v
+ m.PageTables = &v
case "NFS_Unstable:":
- m.NFSUnstable = v
+ m.NFSUnstable = &v
case "Bounce:":
- m.Bounce = v
+ m.Bounce = &v
case "WritebackTmp:":
- m.WritebackTmp = v
+ m.WritebackTmp = &v
case "CommitLimit:":
- m.CommitLimit = v
+ m.CommitLimit = &v
case "Committed_AS:":
- m.CommittedAS = v
+ m.CommittedAS = &v
case "VmallocTotal:":
- m.VmallocTotal = v
+ m.VmallocTotal = &v
case "VmallocUsed:":
- m.VmallocUsed = v
+ m.VmallocUsed = &v
case "VmallocChunk:":
- m.VmallocChunk = v
+ m.VmallocChunk = &v
case "HardwareCorrupted:":
- m.HardwareCorrupted = v
+ m.HardwareCorrupted = &v
case "AnonHugePages:":
- m.AnonHugePages = v
+ m.AnonHugePages = &v
case "ShmemHugePages:":
- m.ShmemHugePages = v
+ m.ShmemHugePages = &v
case "ShmemPmdMapped:":
- m.ShmemPmdMapped = v
+ m.ShmemPmdMapped = &v
case "CmaTotal:":
- m.CmaTotal = v
+ m.CmaTotal = &v
case "CmaFree:":
- m.CmaFree = v
+ m.CmaFree = &v
case "HugePages_Total:":
- m.HugePagesTotal = v
+ m.HugePagesTotal = &v
case "HugePages_Free:":
- m.HugePagesFree = v
+ m.HugePagesFree = &v
case "HugePages_Rsvd:":
- m.HugePagesRsvd = v
+ m.HugePagesRsvd = &v
case "HugePages_Surp:":
- m.HugePagesSurp = v
+ m.HugePagesSurp = &v
case "Hugepagesize:":
- m.Hugepagesize = v
+ m.Hugepagesize = &v
case "DirectMap4k:":
- m.DirectMap4k = v
+ m.DirectMap4k = &v
case "DirectMap2M:":
- m.DirectMap2M = v
+ m.DirectMap2M = &v
case "DirectMap1G:":
- m.DirectMap1G = v
+ m.DirectMap1G = &v
}
}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 861ced9da..f7a828bb1 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -338,12 +338,12 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
if len(ss) == 0 {
break
}
- if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
- }
switch ss[0] {
case fieldOpts:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
if stats.Opts == nil {
stats.Opts = map[string]string{}
}
@@ -356,6 +356,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
}
case fieldAge:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
if err != nil {
@@ -364,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d
case fieldBytes:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
return nil, err
@@ -371,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats
case fieldEvents:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
return nil, err
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index b637be984..9964a3600 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -55,7 +55,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
+ return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
}
return stat, nil
@@ -147,7 +147,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
- return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
+ return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
}
return val, err
}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
new file mode 100644
index 000000000..ac01dd847
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -0,0 +1,220 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ // readLimit is used by io.LimitReader while reading the content of the
+ // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
+ // as each line represents a single used socket.
+ // In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
+ // With e.g. 150 Byte per line and the maximum number of 65535,
+ // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
+ readLimit = 4294967296 // Byte -> 4 GiB
+)
+
+// this contains generic data structures for both udp and tcp sockets
+type (
+ // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
+ NetIPSocket []*netIPSocketLine
+
+ // NetIPSocketSummary provides already computed values like the total queue lengths or
+ // the total number of used sockets. In contrast to NetIPSocket it does not collect
+ // the parsed lines into a slice.
+ NetIPSocketSummary struct {
+ // TxQueueLength shows the total queue length of all parsed tx_queue lengths.
+ TxQueueLength uint64
+ // RxQueueLength shows the total queue length of all parsed rx_queue lengths.
+ RxQueueLength uint64
+ // UsedSockets shows the total number of parsed lines representing the
+ // number of used sockets.
+ UsedSockets uint64
+ }
+
+ // netIPSocketLine represents the fields parsed from a single line
+ // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // For the proc file format details, see https://linux.die.net/man/5/proc.
+ netIPSocketLine struct {
+ Sl uint64
+ LocalAddr net.IP
+ LocalPort uint64
+ RemAddr net.IP
+ RemPort uint64
+ St uint64
+ TxQueue uint64
+ RxQueue uint64
+ UID uint64
+ }
+)
+
+func newNetIPSocket(file string) (NetIPSocket, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var netIPSocket NetIPSocket
+
+ lr := io.LimitReader(f, readLimit)
+ s := bufio.NewScanner(lr)
+ s.Scan() // skip first line with headers
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+ line, err := parseNetIPSocketLine(fields)
+ if err != nil {
+ return nil, err
+ }
+ netIPSocket = append(netIPSocket, line)
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return netIPSocket, nil
+}
+
+// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
+func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var netIPSocketSummary NetIPSocketSummary
+
+ lr := io.LimitReader(f, readLimit)
+ s := bufio.NewScanner(lr)
+ s.Scan() // skip first line with headers
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+ line, err := parseNetIPSocketLine(fields)
+ if err != nil {
+ return nil, err
+ }
+ netIPSocketSummary.TxQueueLength += line.TxQueue
+ netIPSocketSummary.RxQueueLength += line.RxQueue
+ netIPSocketSummary.UsedSockets++
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return &netIPSocketSummary, nil
+}
+
+// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
+
+func parseIP(hexIP string) (net.IP, error) {
+ var byteIP []byte
+ byteIP, err := hex.DecodeString(hexIP)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
+ }
+ switch len(byteIP) {
+ case 4:
+ return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
+ case 16:
+ i := net.IP{
+ byteIP[3], byteIP[2], byteIP[1], byteIP[0],
+ byteIP[7], byteIP[6], byteIP[5], byteIP[4],
+ byteIP[11], byteIP[10], byteIP[9], byteIP[8],
+ byteIP[15], byteIP[14], byteIP[13], byteIP[12],
+ }
+ return i, nil
+ default:
+ return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
+ }
+}
+
+// parseNetIPSocketLine parses a single line, represented by a list of fields.
+func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
+ line := &netIPSocketLine{}
+ if len(fields) < 8 {
+ return nil, fmt.Errorf(
+ "cannot parse net socket line as it has less then 8 columns %q",
+ strings.Join(fields, " "),
+ )
+ }
+ var err error // parse error
+
+ // sl
+ s := strings.Split(fields[0], ":")
+ if len(s) != 2 {
+ return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
+ }
+
+ if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
+ return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
+ }
+ // local_address
+ l := strings.Split(fields[1], ":")
+ if len(l) != 2 {
+ return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
+ }
+ if line.LocalAddr, err = parseIP(l[0]); err != nil {
+ return nil, err
+ }
+ if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
+ return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
+ }
+
+ // remote_address
+ r := strings.Split(fields[2], ":")
+ if len(r) != 2 {
+ return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
+ }
+ if line.RemAddr, err = parseIP(r[0]); err != nil {
+ return nil, err
+ }
+ if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
+ return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
+ }
+
+ // st
+ if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
+ return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
+ }
+
+ // tx_queue and rx_queue
+ q := strings.Split(fields[4], ":")
+ if len(q) != 2 {
+ return nil, fmt.Errorf(
+ "cannot parse tx/rx queues in socket line as it has a missing colon %q",
+ fields[4],
+ )
+ }
+ if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
+ return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
+ }
+ if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
+ return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
+ }
+
+ // uid
+ if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
+ return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
+ }
+
+ return line, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
new file mode 100644
index 000000000..8c6de3791
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -0,0 +1,180 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// NetProtocolStats stores the contents from /proc/net/protocols
+type NetProtocolStats map[string]NetProtocolStatLine
+
+// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
+// only care about the first six columns as the rest are not likely to change
+// and only serve to provide a set of capabilities for each protocol.
+type NetProtocolStatLine struct {
+ Name string // 0 The name of the protocol
+ Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
+ Sockets int64 // 2 Number of sockets in use by this protocol
+ Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
+ Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
+ MaxHeader uint64 // 5 Protocol specific max header size
+ Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
+ ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
+ Capabilities NetProtocolCapabilities
+}
+
+// NetProtocolCapabilities contains a list of capabilities for each protocol
+type NetProtocolCapabilities struct {
+ Close bool // 8
+ Connect bool // 9
+ Disconnect bool // 10
+ Accept bool // 11
+ IoCtl bool // 12
+ Init bool // 13
+ Destroy bool // 14
+ Shutdown bool // 15
+ SetSockOpt bool // 16
+ GetSockOpt bool // 17
+ SendMsg bool // 18
+ RecvMsg bool // 19
+ SendPage bool // 20
+ Bind bool // 21
+ BacklogRcv bool // 22
+ Hash bool // 23
+ UnHash bool // 24
+ GetPort bool // 25
+ EnterMemoryPressure bool // 26
+}
+
+// NetProtocols reads stats from /proc/net/protocols and returns a map of
+// PortocolStatLine entries. As of this writing no official Linux Documentation
+// exists, however the source is fairly self-explanatory and the format seems
+// stable since its introduction in 2.6.12-rc2
+// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
+// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
+func (fs FS) NetProtocols() (NetProtocolStats, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
+ if err != nil {
+ return NetProtocolStats{}, err
+ }
+ return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
+}
+
+func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
+ nps := NetProtocolStats{}
+
+ // Skip the header line
+ s.Scan()
+
+ for s.Scan() {
+ line, err := nps.parseLine(s.Text())
+ if err != nil {
+ return NetProtocolStats{}, err
+ }
+
+ nps[line.Name] = *line
+ }
+ return nps, nil
+}
+
+func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
+ line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
+ var err error
+ const enabled = "yes"
+ const disabled = "no"
+
+ fields := strings.Fields(rawLine)
+ line.Name = fields[0]
+ line.Size, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ if fields[4] == enabled {
+ line.Pressure = 1
+ } else if fields[4] == disabled {
+ line.Pressure = 0
+ } else {
+ line.Pressure = -1
+ }
+ line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ if fields[6] == enabled {
+ line.Slab = true
+ } else if fields[6] == disabled {
+ line.Slab = false
+ } else {
+ return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
+ }
+ line.ModuleName = fields[7]
+
+ err = line.Capabilities.parseCapabilities(fields[8:])
+ if err != nil {
+ return nil, err
+ }
+
+ return line, nil
+}
+
+func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
+ // The capabilities are all bools so we can loop over to map them
+ capabilityFields := [...]*bool{
+ &pc.Close,
+ &pc.Connect,
+ &pc.Disconnect,
+ &pc.Accept,
+ &pc.IoCtl,
+ &pc.Init,
+ &pc.Destroy,
+ &pc.Shutdown,
+ &pc.SetSockOpt,
+ &pc.GetSockOpt,
+ &pc.SendMsg,
+ &pc.RecvMsg,
+ &pc.SendPage,
+ &pc.Bind,
+ &pc.BacklogRcv,
+ &pc.Hash,
+ &pc.UnHash,
+ &pc.GetPort,
+ &pc.EnterMemoryPressure,
+ }
+
+ for i := 0; i < len(capabilities); i++ {
+ if capabilities[i] == "y" {
+ *capabilityFields[i] = true
+ } else if capabilities[i] == "n" {
+ *capabilityFields[i] = false
+ } else {
+ return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
index f91ef5523..e36f4872d 100644
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -70,7 +70,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err)
+ return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
}
return stat, nil
@@ -90,7 +90,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
- return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err)
+ return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index db5debdf4..46f12c61d 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -51,7 +51,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
+ return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
}
return entries, nil
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
new file mode 100644
index 000000000..527762955
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -0,0 +1,64 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+type (
+ // NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
+ NetTCP []*netIPSocketLine
+
+ // NetTCPSummary provides already computed values like the total queue lengths or
+ // the total number of used sockets. In contrast to NetTCP it does not collect
+ // the parsed lines into a slice.
+ NetTCPSummary NetIPSocketSummary
+)
+
+// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp.
+func (fs FS) NetTCP() (NetTCP, error) {
+ return newNetTCP(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp6.
+func (fs FS) NetTCP6() (NetTCP, error) {
+ return newNetTCP(fs.proc.Path("net/tcp6"))
+}
+
+// NetTCPSummary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp.
+func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
+ return newNetTCPSummary(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6Summary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp6.
+func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
+ return newNetTCPSummary(fs.proc.Path("net/tcp6"))
+}
+
+// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
+func newNetTCP(file string) (NetTCP, error) {
+ n, err := newNetIPSocket(file)
+ n1 := NetTCP(n)
+ return n1, err
+}
+
+func newNetTCPSummary(file string) (*NetTCPSummary, error) {
+ n, err := newNetIPSocketSummary(file)
+ if n == nil {
+ return nil, err
+ }
+ n1 := NetTCPSummary(*n)
+ return &n1, err
+}
diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go
index d017e3f18..9ac3daf2d 100644
--- a/vendor/github.com/prometheus/procfs/net_udp.go
+++ b/vendor/github.com/prometheus/procfs/net_udp.go
@@ -13,58 +13,14 @@
package procfs
-import (
- "bufio"
- "encoding/hex"
- "fmt"
- "io"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- // readLimit is used by io.LimitReader while reading the content of the
- // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
- // as each line represents a single used socket.
- // In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
- // With e.g. 150 Byte per line and the maximum number of 65535,
- // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
- readLimit = 4294967296 // Byte -> 4 GiB
-)
-
type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
- NetUDP []*netUDPLine
+ NetUDP []*netIPSocketLine
// NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice.
- NetUDPSummary struct {
- // TxQueueLength shows the total queue length of all parsed tx_queue lengths.
- TxQueueLength uint64
- // RxQueueLength shows the total queue length of all parsed rx_queue lengths.
- RxQueueLength uint64
- // UsedSockets shows the total number of parsed lines representing the
- // number of used sockets.
- UsedSockets uint64
- }
-
- // netUDPLine represents the fields parsed from a single line
- // in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
- // For the proc file format details, see https://linux.die.net/man/5/proc.
- netUDPLine struct {
- Sl uint64
- LocalAddr net.IP
- LocalPort uint64
- RemAddr net.IP
- RemPort uint64
- St uint64
- TxQueue uint64
- RxQueue uint64
- UID uint64
- }
+ NetUDPSummary NetIPSocketSummary
)
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
@@ -93,137 +49,16 @@ func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- netUDP := NetUDP{}
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetUDPLine(fields)
- if err != nil {
- return nil, err
- }
- netUDP = append(netUDP, line)
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return netUDP, nil
+ n, err := newNetIPSocket(file)
+ n1 := NetUDP(n)
+ return n1, err
}
-// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- netUDPSummary := &NetUDPSummary{}
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetUDPLine(fields)
- if err != nil {
- return nil, err
- }
- netUDPSummary.TxQueueLength += line.TxQueue
- netUDPSummary.RxQueueLength += line.RxQueue
- netUDPSummary.UsedSockets++
- }
- if err := s.Err(); err != nil {
+ n, err := newNetIPSocketSummary(file)
+ if n == nil {
return nil, err
}
- return netUDPSummary, nil
-}
-
-// parseNetUDPLine parses a single line, represented by a list of fields.
-func parseNetUDPLine(fields []string) (*netUDPLine, error) {
- line := &netUDPLine{}
- if len(fields) < 8 {
- return nil, fmt.Errorf(
- "cannot parse net udp socket line as it has less then 8 columns: %s",
- strings.Join(fields, " "),
- )
- }
- var err error // parse error
-
- // sl
- s := strings.Split(fields[0], ":")
- if len(s) != 2 {
- return nil, fmt.Errorf(
- "cannot parse sl field in udp socket line: %s", fields[0])
- }
-
- if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
- }
- // local_address
- l := strings.Split(fields[1], ":")
- if len(l) != 2 {
- return nil, fmt.Errorf(
- "cannot parse local_address field in udp socket line: %s", fields[1])
- }
- if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
- return nil, fmt.Errorf(
- "cannot parse local_address value in udp socket line: %s", err)
- }
- if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf(
- "cannot parse local_address port value in udp socket line: %s", err)
- }
-
- // remote_address
- r := strings.Split(fields[2], ":")
- if len(r) != 2 {
- return nil, fmt.Errorf(
- "cannot parse rem_address field in udp socket line: %s", fields[1])
- }
- if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
- return nil, fmt.Errorf(
- "cannot parse rem_address value in udp socket line: %s", err)
- }
- if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf(
- "cannot parse rem_address port value in udp socket line: %s", err)
- }
-
- // st
- if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf(
- "cannot parse st value in udp socket line: %s", err)
- }
-
- // tx_queue and rx_queue
- q := strings.Split(fields[4], ":")
- if len(q) != 2 {
- return nil, fmt.Errorf(
- "cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
- fields[4],
- )
- }
- if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
- }
- if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
- }
-
- // uid
- if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf(
- "cannot parse uid value in udp socket line: %s", err)
- }
-
- return line, nil
+ n1 := NetUDPSummary(*n)
+ return &n1, err
}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index c55b4b18e..98aa8e1c3 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
+ return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
- return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
+ return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
}
return &nu, nil
@@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
+ return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
- return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
+ return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
- return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
+ return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
- return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
+ return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
- return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
+ return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 9f97b6e52..28f696803 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -105,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
}
p := Procs{}
@@ -206,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
- return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
}
fds[i] = uintptr(fd)
}
@@ -278,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
}
return names, nil
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index 4abd46451..0094a13c0 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -49,7 +49,7 @@ type Cgroup struct {
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
var err error
- fields := strings.Split(cgroupStr, ":")
+ fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index a76ca7079..cf63227f0 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -16,7 +16,7 @@ package procfs
import (
"bufio"
"bytes"
- "errors"
+ "fmt"
"regexp"
"github.com/prometheus/procfs/internal/util"
@@ -112,7 +112,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
}
return i, nil
}
- return nil, errors.New("invalid inode entry: " + line)
+ return nil, fmt.Errorf("invalid inode entry: %q", line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index 91ee24df8..dd20f198a 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -26,55 +26,55 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
- CPUTime int64
+ CPUTime uint64
// Maximum size of files that the process may create.
- FileSize int64
+ FileSize uint64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
- DataSize int64
+ DataSize uint64
// Maximum size of the process stack in bytes.
- StackSize int64
+ StackSize uint64
// Maximum size of a core file.
- CoreFileSize int64
+ CoreFileSize uint64
// Limit of the process's resident set in pages.
- ResidentSet int64
+ ResidentSet uint64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
- Processes int64
+ Processes uint64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
- OpenFiles int64
+ OpenFiles uint64
// Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory int64
+ LockedMemory uint64
// Maximum size of the process's virtual memory address space in bytes.
- AddressSpace int64
+ AddressSpace uint64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
- FileLocks int64
+ FileLocks uint64
// Limit of signals that may be queued for the real user ID of the calling
// process.
- PendingSignals int64
+ PendingSignals uint64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
- MsqqueueSize int64
+ MsqqueueSize uint64
// Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority int64
+ NicePriority uint64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
- RealtimePriority int64
+ RealtimePriority uint64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
- RealtimeTimeout int64
+ RealtimeTimeout uint64
}
const (
- limitsFields = 3
+ limitsFields = 4
limitsUnlimited = "unlimited"
)
var (
- limitsDelimiter = regexp.MustCompile(" +")
+ limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
)
// NewLimits returns the current soft limits of the process.
@@ -96,46 +96,49 @@ func (p Proc) Limits() (ProcLimits, error) {
l = ProcLimits{}
s = bufio.NewScanner(f)
)
+
+ s.Scan() // Skip limits header
+
for s.Scan() {
- fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ //fields := limitsMatch.Split(s.Text(), limitsFields)
+ fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf(
- "couldn't parse %s line %s", f.Name(), s.Text())
+ return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
}
- switch fields[0] {
+ switch fields[1] {
case "Max cpu time":
- l.CPUTime, err = parseInt(fields[1])
+ l.CPUTime, err = parseUint(fields[2])
case "Max file size":
- l.FileSize, err = parseInt(fields[1])
+ l.FileSize, err = parseUint(fields[2])
case "Max data size":
- l.DataSize, err = parseInt(fields[1])
+ l.DataSize, err = parseUint(fields[2])
case "Max stack size":
- l.StackSize, err = parseInt(fields[1])
+ l.StackSize, err = parseUint(fields[2])
case "Max core file size":
- l.CoreFileSize, err = parseInt(fields[1])
+ l.CoreFileSize, err = parseUint(fields[2])
case "Max resident set":
- l.ResidentSet, err = parseInt(fields[1])
+ l.ResidentSet, err = parseUint(fields[2])
case "Max processes":
- l.Processes, err = parseInt(fields[1])
+ l.Processes, err = parseUint(fields[2])
case "Max open files":
- l.OpenFiles, err = parseInt(fields[1])
+ l.OpenFiles, err = parseUint(fields[2])
case "Max locked memory":
- l.LockedMemory, err = parseInt(fields[1])
+ l.LockedMemory, err = parseUint(fields[2])
case "Max address space":
- l.AddressSpace, err = parseInt(fields[1])
+ l.AddressSpace, err = parseUint(fields[2])
case "Max file locks":
- l.FileLocks, err = parseInt(fields[1])
+ l.FileLocks, err = parseUint(fields[2])
case "Max pending signals":
- l.PendingSignals, err = parseInt(fields[1])
+ l.PendingSignals, err = parseUint(fields[2])
case "Max msgqueue size":
- l.MsqqueueSize, err = parseInt(fields[1])
+ l.MsqqueueSize, err = parseUint(fields[2])
case "Max nice priority":
- l.NicePriority, err = parseInt(fields[1])
+ l.NicePriority, err = parseUint(fields[2])
case "Max realtime priority":
- l.RealtimePriority, err = parseInt(fields[1])
+ l.RealtimePriority, err = parseUint(fields[2])
case "Max realtime timeout":
- l.RealtimeTimeout, err = parseInt(fields[1])
+ l.RealtimeTimeout, err = parseUint(fields[2])
}
if err != nil {
return ProcLimits{}, err
@@ -145,13 +148,13 @@ func (p Proc) Limits() (ProcLimits, error) {
return l, s.Err()
}
-func parseInt(s string) (int64, error) {
+func parseUint(s string) (uint64, error) {
if s == limitsUnlimited {
- return -1, nil
+ return 18446744073709551615, nil
}
- i, err := strconv.ParseInt(s, 10, 64)
+ i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
- return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
}
return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
index c66740ff7..391b4cbd1 100644
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
+ return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
}
ns := make(Namespaces, len(names))
@@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
- return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
+ return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
- return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
+ return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
index 0d7bee54c..dc6c14f0a 100644
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -59,7 +59,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
- return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
+ return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
}
return parsePSIStats(resource, bytes.NewReader(data))
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 4517d2e9d..67ca0e9fb 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -127,10 +127,7 @@ func (p Proc) Stat() (ProcStat, error) {
)
if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf(
- "unexpected format, couldn't extract comm: %s",
- data,
- )
+ return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
}
s.Comm = string(data[l+1 : r])
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
index a4c4089ac..28228164e 100644
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -95,24 +95,27 @@ func (fs FS) Schedstat() (*Schedstat, error) {
return stats, nil
}
-func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
+func parseProcSchedstat(contents string) (ProcSchedstat, error) {
+ var (
+ stats ProcSchedstat
+ err error
+ )
match := procLineRE.FindStringSubmatch(contents)
if match != nil {
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
if err != nil {
- return
+ return stats, err
}
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
if err != nil {
- return
+ return stats, err
}
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
- return
+ return stats, err
}
- err = errors.New("could not parse schedstat")
- return
+ return stats, errors.New("could not parse schedstat")
}
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
new file mode 100644
index 000000000..7896fd724
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -0,0 +1,151 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+var (
+ slabSpace = regexp.MustCompile(`\s+`)
+ slabVer = regexp.MustCompile(`slabinfo -`)
+ slabHeader = regexp.MustCompile(`# name`)
+)
+
+// Slab represents a slab pool in the kernel.
+type Slab struct {
+ Name string
+ ObjActive int64
+ ObjNum int64
+ ObjSize int64
+ ObjPerSlab int64
+ PagesPerSlab int64
+ // tunables
+ Limit int64
+ Batch int64
+ SharedFactor int64
+ SlabActive int64
+ SlabNum int64
+ SharedAvail int64
+}
+
+// SlabInfo represents info for all slabs.
+type SlabInfo struct {
+ Slabs []*Slab
+}
+
+func shouldParseSlab(line string) bool {
+ if slabVer.MatchString(line) {
+ return false
+ }
+ if slabHeader.MatchString(line) {
+ return false
+ }
+ return true
+}
+
+// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
+func parseV21SlabEntry(line string) (*Slab, error) {
+ // First cleanup whitespace.
+ l := slabSpace.ReplaceAllString(line, " ")
+ s := strings.Split(l, " ")
+ if len(s) != 16 {
+ return nil, fmt.Errorf("unable to parse: %q", line)
+ }
+ var err error
+ i := &Slab{Name: s[0]}
+ i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.Limit, err = strconv.ParseInt(s[8], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.Batch, err = strconv.ParseInt(s[9], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
+func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
+ scanner := bufio.NewScanner(r)
+ s := SlabInfo{Slabs: []*Slab{}}
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !shouldParseSlab(line) {
+ continue
+ }
+ slab, err := parseV21SlabEntry(line)
+ if err != nil {
+ return s, err
+ }
+ s.Slabs = append(s.Slabs, slab)
+ }
+ return s, nil
+}
+
+// SlabInfo reads data from /proc/slabinfo
+func (fs FS) SlabInfo() (SlabInfo, error) {
+ // TODO: Consider passing options to allow for parsing different
+ // slabinfo versions. However, slabinfo 2.1 has been stable since
+ // kernel 2.6.10 and later.
+ data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
+ if err != nil {
+ return SlabInfo{}, err
+ }
+
+ return parseSlabInfo21(bytes.NewReader(data))
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index b2a6fc994..6d8727541 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
}
if count == 0 {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
}
cpuStat.User /= userHZ
@@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
}
return cpuStat, cpuID, nil
@@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
+ return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
}
return softIRQStat, total, nil
@@ -184,34 +184,34 @@ func (fs FS) Stat() (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
+ return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
+ return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
+ return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
+ return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
+ return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
+ return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
+ return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) {
}
if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err)
+ return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
}
return stat, nil
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
index 30aa417d5..eed07c7d7 100644
--- a/vendor/github.com/prometheus/procfs/xfrm.go
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -112,8 +112,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
- return XfrmStat{}, fmt.Errorf(
- "couldn't parse %s line %s", file.Name(), s.Text())
+ return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
}
name := fields[0]
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
index e941503d5..0b9bb6796 100644
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -74,11 +74,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
- return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
- return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}
diff --git a/vendor/github.com/prometheus/prometheus/NOTICE b/vendor/github.com/prometheus/prometheus/NOTICE
index 5e4f50989..33f226d9f 100644
--- a/vendor/github.com/prometheus/prometheus/NOTICE
+++ b/vendor/github.com/prometheus/prometheus/NOTICE
@@ -91,8 +91,13 @@ https://github.com/dgryski/go-tsz
Copyright (c) 2015,2016 Damian Gryski
See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
+The Codicon icon font from Microsoft
+https://github.com/microsoft/vscode-codicons
+Copyright (c) Microsoft Corporation and other contributors
+See https://github.com/microsoft/vscode-codicons/blob/main/LICENSE for license details.
+
We also use code from a large number of npm packages. For details, see:
-- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json
-- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json
+- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json
+- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json
- The individual package licenses as copied from the node_modules directory can be found in
the npm_licenses.tar.bz2 archive in release tarballs and Docker images.
diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go
index 75904f763..dc2ed19a2 100644
--- a/vendor/github.com/prometheus/prometheus/config/config.go
+++ b/vendor/github.com/prometheus/prometheus/config/config.go
@@ -17,14 +17,19 @@ import (
"fmt"
"io/ioutil"
"net/url"
+ "os"
"path/filepath"
"regexp"
"strings"
"time"
+ "github.com/alecthomas/units"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
+ "github.com/prometheus/common/sigv4"
yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
@@ -33,11 +38,34 @@ import (
)
var (
- patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`)
+ patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`)
+ reservedHeaders = map[string]struct{}{
+ // NOTE: authorization is checked specially,
+ // see RemoteWriteConfig.UnmarshalYAML.
+ // "authorization": {},
+ "host": {},
+ "content-encoding": {},
+ "content-length": {},
+ "content-type": {},
+ "user-agent": {},
+ "connection": {},
+ "keep-alive": {},
+ "proxy-authenticate": {},
+ "proxy-authorization": {},
+ "www-authenticate": {},
+ "accept-encoding": {},
+ "x-prometheus-remote-write-version": {},
+ "x-prometheus-remote-read-version": {},
+
+ // Added by SigV4.
+ "x-amz-date": {},
+ "x-amz-security-token": {},
+ "x-amz-content-sha256": {},
+ }
)
// Load parses the YAML input s into a Config.
-func Load(s string) (*Config, error) {
+func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
cfg := &Config{}
// If the entire config body is empty the UnmarshalYAML method is
// never called. We thus have to set the DefaultConfig at the entry
@@ -48,16 +76,35 @@ func Load(s string) (*Config, error) {
if err != nil {
return nil, err
}
+
+ if !expandExternalLabels {
+ return cfg, nil
+ }
+
+ for i, v := range cfg.GlobalConfig.ExternalLabels {
+ newV := os.Expand(v.Value, func(s string) string {
+ if v := os.Getenv(s); v != "" {
+ return v
+ }
+ level.Warn(logger).Log("msg", "Empty environment variable", "name", s)
+ return ""
+ })
+ if newV != v.Value {
+ level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
+ v.Value = newV
+ cfg.GlobalConfig.ExternalLabels[i] = v
+ }
+ }
return cfg, nil
}
// LoadFile parses the given YAML file into a Config.
-func LoadFile(filename string) (*Config, error) {
+func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
- cfg, err := Load(string(content))
+ cfg, err := Load(string(content), expandExternalLabels, logger)
if err != nil {
return nil, errors.Wrapf(err, "parsing YAML file %s", filename)
}
@@ -83,24 +130,27 @@ var (
DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout and ScrapeInterval default to the
// configured globals.
- MetricsPath: "/metrics",
- Scheme: "http",
- HonorLabels: false,
- HonorTimestamps: true,
+ MetricsPath: "/metrics",
+ Scheme: "http",
+ HonorLabels: false,
+ HonorTimestamps: true,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
// DefaultAlertmanagerConfig is the default alertmanager configuration.
DefaultAlertmanagerConfig = AlertmanagerConfig{
- Scheme: "http",
- Timeout: model.Duration(10 * time.Second),
- APIVersion: AlertmanagerAPIVersionV1,
+ Scheme: "http",
+ Timeout: model.Duration(10 * time.Second),
+ APIVersion: AlertmanagerAPIVersionV2,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
- RemoteTimeout: model.Duration(30 * time.Second),
- QueueConfig: DefaultQueueConfig,
- MetadataConfig: DefaultMetadataConfig,
+ RemoteTimeout: model.Duration(30 * time.Second),
+ QueueConfig: DefaultQueueConfig,
+ MetadataConfig: DefaultMetadataConfig,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
// DefaultQueueConfig is the default remote queue configuration.
@@ -124,13 +174,24 @@ var (
// DefaultMetadataConfig is the default metadata configuration for a remote write endpoint.
DefaultMetadataConfig = MetadataConfig{
- Send: true,
- SendInterval: model.Duration(1 * time.Minute),
+ Send: true,
+ SendInterval: model.Duration(1 * time.Minute),
+ MaxSamplesPerSend: 500,
}
// DefaultRemoteReadConfig is the default remote read configuration.
DefaultRemoteReadConfig = RemoteReadConfig{
- RemoteTimeout: model.Duration(1 * time.Minute),
+ RemoteTimeout: model.Duration(1 * time.Minute),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+ }
+
+ // DefaultStorageConfig is the default TSDB/Exemplar storage configuration.
+ DefaultStorageConfig = StorageConfig{
+ ExemplarsConfig: &DefaultExemplarsConfig,
+ }
+
+ DefaultExemplarsConfig = ExemplarsConfig{
+ MaxExemplars: 100000,
}
)
@@ -140,6 +201,7 @@ type Config struct {
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
RuleFiles []string `yaml:"rule_files,omitempty"`
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
+ StorageConfig StorageConfig `yaml:"storage,omitempty"`
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
@@ -333,11 +395,24 @@ type ScrapeConfig struct {
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
- // More than this many samples post metric-relabeling will cause the scrape to fail.
+ // An uncompressed response body larger than this many bytes will cause the
+ // scrape to fail. 0 means no limit.
+ BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
+ // More than this many samples post metric-relabeling will cause the scrape to
+ // fail.
SampleLimit uint `yaml:"sample_limit,omitempty"`
// More than this many targets after the target relabeling will cause the
// scrapes to fail.
TargetLimit uint `yaml:"target_limit,omitempty"`
+ // More than this many labels post metric-relabeling will cause the scrape to
+ // fail.
+ LabelLimit uint `yaml:"label_limit,omitempty"`
+ // More than this label name length post metric-relabeling will cause the
+ // scrape to fail.
+ LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
+ // More than this label value length post metric-relabeling will cause the
+ // scrape to fail.
+ LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -400,6 +475,18 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
+// StorageConfig configures runtime reloadable configuration options.
+type StorageConfig struct {
+ ExemplarsConfig *ExemplarsConfig `yaml:"exemplars,omitempty"`
+}
+
+// ExemplarsConfig configures runtime reloadable configuration options.
+type ExemplarsConfig struct {
+ // MaxExemplars sets the size, in # of exemplars stored, of the single circular buffer used to store exemplars in memory.
+ // Use a value of 0 or less than 0 to disable the storage without having to restart Prometheus.
+ MaxExemplars int64 `yaml:"max_exemplars,omitempty"`
+}
+
// AlertingConfig configures alerting and alertmanager related configs.
type AlertingConfig struct {
AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"`
@@ -570,14 +657,17 @@ func CheckTargetAddress(address model.LabelValue) error {
type RemoteWriteConfig struct {
URL *config.URL `yaml:"url"`
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
+ Headers map[string]string `yaml:"headers,omitempty"`
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
Name string `yaml:"name,omitempty"`
+ SendExemplars bool `yaml:"send_exemplars,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
+ SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@@ -600,11 +690,37 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return errors.New("empty or null relabeling rule in remote write config")
}
}
+ if err := validateHeaders(c.Headers); err != nil {
+ return err
+ }
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
- return c.HTTPClientConfig.Validate()
+ if err := c.HTTPClientConfig.Validate(); err != nil {
+ return err
+ }
+
+ httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
+ c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
+
+ if httpClientConfigAuthEnabled && c.SigV4Config != nil {
+ return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
+ }
+
+ return nil
+}
+
+func validateHeaders(headers map[string]string) error {
+ for header := range headers {
+ if strings.ToLower(header) == "authorization" {
+ return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter")
+ }
+ if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
+ return errors.Errorf("%s is a reserved header. It must not be changed", header)
+ }
+ }
+ return nil
}
// QueueConfig is the configuration for the queue used to write to remote
@@ -627,8 +743,9 @@ type QueueConfig struct {
BatchSendDeadline model.Duration `yaml:"batch_send_deadline,omitempty"`
// On recoverable errors, backoff exponentially.
- MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
- MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
+ MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
+ MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
+ RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"`
}
// MetadataConfig is the configuration for sending metadata to remote
@@ -638,14 +755,17 @@ type MetadataConfig struct {
Send bool `yaml:"send"`
// SendInterval controls how frequently we send metric metadata.
SendInterval model.Duration `yaml:"send_interval"`
+ // Maximum number of samples per send.
+ MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"`
}
// RemoteReadConfig is the configuration for reading from remote storage.
type RemoteReadConfig struct {
- URL *config.URL `yaml:"url"`
- RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
- ReadRecent bool `yaml:"read_recent,omitempty"`
- Name string `yaml:"name,omitempty"`
+ URL *config.URL `yaml:"url"`
+ RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
+ Headers map[string]string `yaml:"headers,omitempty"`
+ ReadRecent bool `yaml:"read_recent,omitempty"`
+ Name string `yaml:"name,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -671,6 +791,9 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro
if c.URL == nil {
return errors.New("url for remote_read is empty")
}
+ if err := validateHeaders(c.Headers); err != nil {
+ return err
+ }
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md
index f7d7120dd..19b579b39 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/README.md
+++ b/vendor/github.com/prometheus/prometheus/discovery/README.md
@@ -131,7 +131,7 @@ the Prometheus server will be able to see them.
### The SD interface
-A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [target group](https://godoc.org/github.com/prometheus/prometheus/discovery/targetgroup#Group). The SD mechanism sends the targets down to prometheus as list of target groups.
+A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [target group](https://pkg.go.dev/github.com/prometheus/prometheus/discovery/targetgroup#Group). The SD mechanism sends the targets down to prometheus as list of target groups.
An SD mechanism has to implement the `Discoverer` Interface:
```go
@@ -259,3 +259,9 @@ Here are some non-obvious parts of adding service discoveries that need to be ve
`` in `docs/configuration/configuration.md`.
+
+### Examples of Service Discovery pull requests
+
+The examples given might become out of date but should give a good impression about the areas touched by a new service discovery.
+
+- [Eureka](https://github.com/prometheus/prometheus/pull/3369)
diff --git a/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go b/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go
new file mode 100644
index 000000000..0bcfd0547
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go
@@ -0,0 +1,339 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+const (
+ ec2Label = model.MetaLabelPrefix + "ec2_"
+ ec2LabelAMI = ec2Label + "ami"
+ ec2LabelAZ = ec2Label + "availability_zone"
+ ec2LabelAZID = ec2Label + "availability_zone_id"
+ ec2LabelArch = ec2Label + "architecture"
+ ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses"
+ ec2LabelInstanceID = ec2Label + "instance_id"
+ ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle"
+ ec2LabelInstanceState = ec2Label + "instance_state"
+ ec2LabelInstanceType = ec2Label + "instance_type"
+ ec2LabelOwnerID = ec2Label + "owner_id"
+ ec2LabelPlatform = ec2Label + "platform"
+ ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
+ ec2LabelPrivateDNS = ec2Label + "private_dns_name"
+ ec2LabelPrivateIP = ec2Label + "private_ip"
+ ec2LabelPublicDNS = ec2Label + "public_dns_name"
+ ec2LabelPublicIP = ec2Label + "public_ip"
+ ec2LabelSubnetID = ec2Label + "subnet_id"
+ ec2LabelTag = ec2Label + "tag_"
+ ec2LabelVPCID = ec2Label + "vpc_id"
+ ec2LabelSeparator = ","
+)
+
+var (
+ // DefaultEC2SDConfig is the default EC2 SD configuration.
+ DefaultEC2SDConfig = EC2SDConfig{
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ }
+)
+
+func init() {
+ discovery.RegisterConfig(&EC2SDConfig{})
+}
+
+// EC2Filter is the configuration for filtering EC2 instances.
+type EC2Filter struct {
+ Name string `yaml:"name"`
+ Values []string `yaml:"values"`
+}
+
+// EC2SDConfig is the configuration for EC2 based service discovery.
+type EC2SDConfig struct {
+ Endpoint string `yaml:"endpoint"`
+ Region string `yaml:"region"`
+ AccessKey string `yaml:"access_key,omitempty"`
+ SecretKey config.Secret `yaml:"secret_key,omitempty"`
+ Profile string `yaml:"profile,omitempty"`
+ RoleARN string `yaml:"role_arn,omitempty"`
+ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
+ Port int `yaml:"port"`
+ Filters []*EC2Filter `yaml:"filters"`
+}
+
+// Name returns the name of the EC2 Config.
+func (*EC2SDConfig) Name() string { return "ec2" }
+
+// NewDiscoverer returns a Discoverer for the EC2 Config.
+func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewEC2Discovery(c, opts.Logger), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
+func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultEC2SDConfig
+ type plain EC2SDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+ if c.Region == "" {
+ sess, err := session.NewSession()
+ if err != nil {
+ return err
+ }
+ metadata := ec2metadata.New(sess)
+ region, err := metadata.Region()
+ if err != nil {
+ return errors.New("EC2 SD configuration requires a region")
+ }
+ c.Region = region
+ }
+ for _, f := range c.Filters {
+ if len(f.Values) == 0 {
+ return errors.New("EC2 SD configuration filter values cannot be empty")
+ }
+ }
+ return nil
+}
+
+// EC2Discovery periodically performs EC2-SD requests. It implements
+// the Discoverer interface.
+type EC2Discovery struct {
+ *refresh.Discovery
+ logger log.Logger
+ cfg *EC2SDConfig
+ ec2 *ec2.EC2
+
+ // azToAZID maps this account's availability zones to their underlying AZ
+ // ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so
+ // no locking is required.
+ azToAZID map[string]string
+}
+
+// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
+func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
+ if logger == nil {
+ logger = log.NewNopLogger()
+ }
+ d := &EC2Discovery{
+ logger: logger,
+ cfg: conf,
+ }
+ d.Discovery = refresh.NewDiscovery(
+ logger,
+ "ec2",
+ time.Duration(d.cfg.RefreshInterval),
+ d.refresh,
+ )
+ return d
+}
+
+func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
+ if d.ec2 != nil {
+ return d.ec2, nil
+ }
+
+ creds := credentials.NewStaticCredentials(d.cfg.AccessKey, string(d.cfg.SecretKey), "")
+ if d.cfg.AccessKey == "" && d.cfg.SecretKey == "" {
+ creds = nil
+ }
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Endpoint: &d.cfg.Endpoint,
+ Region: &d.cfg.Region,
+ Credentials: creds,
+ },
+ Profile: d.cfg.Profile,
+ })
+ if err != nil {
+ return nil, errors.Wrap(err, "could not create aws session")
+ }
+
+ if d.cfg.RoleARN != "" {
+ creds := stscreds.NewCredentials(sess, d.cfg.RoleARN)
+ d.ec2 = ec2.New(sess, &aws.Config{Credentials: creds})
+ } else {
+ d.ec2 = ec2.New(sess)
+ }
+
+ return d.ec2, nil
+}
+
+func (d *EC2Discovery) refreshAZIDs(ctx context.Context) error {
+ azs, err := d.ec2.DescribeAvailabilityZonesWithContext(ctx, &ec2.DescribeAvailabilityZonesInput{})
+ if err != nil {
+ return err
+ }
+ d.azToAZID = make(map[string]string, len(azs.AvailabilityZones))
+ for _, az := range azs.AvailabilityZones {
+ d.azToAZID[*az.ZoneName] = *az.ZoneId
+ }
+ return nil
+}
+
+func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ ec2Client, err := d.ec2Client(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ tg := &targetgroup.Group{
+ Source: d.cfg.Region,
+ }
+
+ var filters []*ec2.Filter
+ for _, f := range d.cfg.Filters {
+ filters = append(filters, &ec2.Filter{
+ Name: aws.String(f.Name),
+ Values: aws.StringSlice(f.Values),
+ })
+ }
+
+ // Only refresh the AZ ID map if we have never been able to build one.
+ // Prometheus requires a reload if AWS adds a new AZ to the region.
+ if d.azToAZID == nil {
+ if err := d.refreshAZIDs(ctx); err != nil {
+ level.Debug(d.logger).Log(
+ "msg", "Unable to describe availability zones",
+ "err", err)
+ }
+ }
+
+ input := &ec2.DescribeInstancesInput{Filters: filters}
+ if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool {
+ for _, r := range p.Reservations {
+ for _, inst := range r.Instances {
+ if inst.PrivateIpAddress == nil {
+ continue
+ }
+
+ labels := model.LabelSet{
+ ec2LabelInstanceID: model.LabelValue(*inst.InstanceId),
+ }
+
+ if r.OwnerId != nil {
+ labels[ec2LabelOwnerID] = model.LabelValue(*r.OwnerId)
+ }
+
+ labels[ec2LabelPrivateIP] = model.LabelValue(*inst.PrivateIpAddress)
+ if inst.PrivateDnsName != nil {
+ labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
+ }
+ addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
+ labels[model.AddressLabel] = model.LabelValue(addr)
+
+ if inst.Platform != nil {
+ labels[ec2LabelPlatform] = model.LabelValue(*inst.Platform)
+ }
+
+ if inst.PublicIpAddress != nil {
+ labels[ec2LabelPublicIP] = model.LabelValue(*inst.PublicIpAddress)
+ labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName)
+ }
+ labels[ec2LabelAMI] = model.LabelValue(*inst.ImageId)
+ labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
+ azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
+ if !ok && d.azToAZID != nil {
+ level.Debug(d.logger).Log(
+ "msg", "Availability zone ID not found",
+ "az", *inst.Placement.AvailabilityZone)
+ }
+ labels[ec2LabelAZID] = model.LabelValue(azID)
+ labels[ec2LabelInstanceState] = model.LabelValue(*inst.State.Name)
+ labels[ec2LabelInstanceType] = model.LabelValue(*inst.InstanceType)
+
+ if inst.InstanceLifecycle != nil {
+ labels[ec2LabelInstanceLifecycle] = model.LabelValue(*inst.InstanceLifecycle)
+ }
+
+ if inst.Architecture != nil {
+ labels[ec2LabelArch] = model.LabelValue(*inst.Architecture)
+ }
+
+ if inst.VpcId != nil {
+ labels[ec2LabelVPCID] = model.LabelValue(*inst.VpcId)
+ labels[ec2LabelPrimarySubnetID] = model.LabelValue(*inst.SubnetId)
+
+ var subnets []string
+ var ipv6addrs []string
+ subnetsMap := make(map[string]struct{})
+ for _, eni := range inst.NetworkInterfaces {
+ if eni.SubnetId == nil {
+ continue
+ }
+ // Deduplicate VPC Subnet IDs maintaining the order of the subnets returned by EC2.
+ if _, ok := subnetsMap[*eni.SubnetId]; !ok {
+ subnetsMap[*eni.SubnetId] = struct{}{}
+ subnets = append(subnets, *eni.SubnetId)
+ }
+
+ for _, ipv6addr := range eni.Ipv6Addresses {
+ ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address)
+ }
+ }
+ labels[ec2LabelSubnetID] = model.LabelValue(
+ ec2LabelSeparator +
+ strings.Join(subnets, ec2LabelSeparator) +
+ ec2LabelSeparator)
+ if len(ipv6addrs) > 0 {
+ labels[ec2LabelIPv6Addresses] = model.LabelValue(
+ ec2LabelSeparator +
+ strings.Join(ipv6addrs, ec2LabelSeparator) +
+ ec2LabelSeparator)
+ }
+ }
+
+ for _, t := range inst.Tags {
+ if t == nil || t.Key == nil || t.Value == nil {
+ continue
+ }
+ name := strutil.SanitizeLabelName(*t.Key)
+ labels[ec2LabelTag+model.LabelName(name)] = model.LabelValue(*t.Value)
+ }
+ tg.Targets = append(tg.Targets, labels)
+ }
+ }
+ return true
+ }); err != nil {
+ if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "AuthFailure" || awsErr.Code() == "UnauthorizedOperation") {
+ d.ec2 = nil
+ }
+ return nil, errors.Wrap(err, "could not describe instances")
+ }
+ return []*targetgroup.Group{tg}, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/aws/lightsail.go b/vendor/github.com/prometheus/prometheus/discovery/aws/lightsail.go
new file mode 100644
index 000000000..e3dc65b5d
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/aws/lightsail.go
@@ -0,0 +1,234 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/lightsail"
+ "github.com/go-kit/log"
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+const (
+ lightsailLabel = model.MetaLabelPrefix + "lightsail_"
+ lightsailLabelAZ = lightsailLabel + "availability_zone"
+ lightsailLabelBlueprintID = lightsailLabel + "blueprint_id"
+ lightsailLabelBundleID = lightsailLabel + "bundle_id"
+ lightsailLabelInstanceName = lightsailLabel + "instance_name"
+ lightsailLabelInstanceState = lightsailLabel + "instance_state"
+ lightsailLabelInstanceSupportCode = lightsailLabel + "instance_support_code"
+ lightsailLabelIPv6Addresses = lightsailLabel + "ipv6_addresses"
+ lightsailLabelPrivateIP = lightsailLabel + "private_ip"
+ lightsailLabelPublicIP = lightsailLabel + "public_ip"
+ lightsailLabelTag = lightsailLabel + "tag_"
+ lightsailLabelSeparator = ","
+)
+
+var (
+ // DefaultLightsailSDConfig is the default Lightsail SD configuration.
+ DefaultLightsailSDConfig = LightsailSDConfig{
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ }
+)
+
+func init() {
+ discovery.RegisterConfig(&LightsailSDConfig{})
+}
+
+// LightsailSDConfig is the configuration for Lightsail based service discovery.
+type LightsailSDConfig struct {
+ Endpoint string `yaml:"endpoint"`
+ Region string `yaml:"region"`
+ AccessKey string `yaml:"access_key,omitempty"`
+ SecretKey config.Secret `yaml:"secret_key,omitempty"`
+ Profile string `yaml:"profile,omitempty"`
+ RoleARN string `yaml:"role_arn,omitempty"`
+ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
+ Port int `yaml:"port"`
+}
+
+// Name returns the name of the Lightsail Config.
+func (*LightsailSDConfig) Name() string { return "lightsail" }
+
+// NewDiscoverer returns a Discoverer for the Lightsail Config.
+func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewLightsailDiscovery(c, opts.Logger), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
+func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultLightsailSDConfig
+ type plain LightsailSDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+ if c.Region == "" {
+ sess, err := session.NewSession()
+ if err != nil {
+ return err
+ }
+
+ metadata := ec2metadata.New(sess)
+
+ region, err := metadata.Region()
+ if err != nil {
+ return errors.New("Lightsail SD configuration requires a region")
+ }
+ c.Region = region
+ }
+ return nil
+}
+
+// LightsailDiscovery periodically performs Lightsail-SD requests. It implements
+// the Discoverer interface.
+type LightsailDiscovery struct {
+ *refresh.Discovery
+ cfg *LightsailSDConfig
+ lightsail *lightsail.Lightsail
+}
+
+// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
+func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *LightsailDiscovery {
+ if logger == nil {
+ logger = log.NewNopLogger()
+ }
+ d := &LightsailDiscovery{
+ cfg: conf,
+ }
+ d.Discovery = refresh.NewDiscovery(
+ logger,
+ "lightsail",
+ time.Duration(d.cfg.RefreshInterval),
+ d.refresh,
+ )
+ return d
+}
+
+func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) {
+ if d.lightsail != nil {
+ return d.lightsail, nil
+ }
+
+ creds := credentials.NewStaticCredentials(d.cfg.AccessKey, string(d.cfg.SecretKey), "")
+ if d.cfg.AccessKey == "" && d.cfg.SecretKey == "" {
+ creds = nil
+ }
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Endpoint: &d.cfg.Endpoint,
+ Region: &d.cfg.Region,
+ Credentials: creds,
+ },
+ Profile: d.cfg.Profile,
+ })
+ if err != nil {
+ return nil, errors.Wrap(err, "could not create aws session")
+ }
+
+ if d.cfg.RoleARN != "" {
+ creds := stscreds.NewCredentials(sess, d.cfg.RoleARN)
+ d.lightsail = lightsail.New(sess, &aws.Config{Credentials: creds})
+ } else {
+ d.lightsail = lightsail.New(sess)
+ }
+
+ return d.lightsail, nil
+}
+
+func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ lightsailClient, err := d.lightsailClient()
+ if err != nil {
+ return nil, err
+ }
+
+ tg := &targetgroup.Group{
+ Source: d.cfg.Region,
+ }
+
+ input := &lightsail.GetInstancesInput{}
+
+ output, err := lightsailClient.GetInstancesWithContext(ctx, input)
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "AuthFailure" || awsErr.Code() == "UnauthorizedOperation") {
+ d.lightsail = nil
+ }
+ return nil, errors.Wrap(err, "could not get instances")
+ }
+
+ for _, inst := range output.Instances {
+ if inst.PrivateIpAddress == nil {
+ continue
+ }
+
+ labels := model.LabelSet{
+ lightsailLabelAZ: model.LabelValue(*inst.Location.AvailabilityZone),
+ lightsailLabelBlueprintID: model.LabelValue(*inst.BlueprintId),
+ lightsailLabelBundleID: model.LabelValue(*inst.BundleId),
+ lightsailLabelInstanceName: model.LabelValue(*inst.Name),
+ lightsailLabelInstanceState: model.LabelValue(*inst.State.Name),
+ lightsailLabelInstanceSupportCode: model.LabelValue(*inst.SupportCode),
+ lightsailLabelPrivateIP: model.LabelValue(*inst.PrivateIpAddress),
+ }
+
+ addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
+ labels[model.AddressLabel] = model.LabelValue(addr)
+
+ if inst.PublicIpAddress != nil {
+ labels[lightsailLabelPublicIP] = model.LabelValue(*inst.PublicIpAddress)
+ }
+
+ if len(inst.Ipv6Addresses) > 0 {
+ var ipv6addrs []string
+ for _, ipv6addr := range inst.Ipv6Addresses {
+ ipv6addrs = append(ipv6addrs, *ipv6addr)
+ }
+ labels[lightsailLabelIPv6Addresses] = model.LabelValue(
+ lightsailLabelSeparator +
+ strings.Join(ipv6addrs, lightsailLabelSeparator) +
+ lightsailLabelSeparator)
+ }
+
+ for _, t := range inst.Tags {
+ if t == nil || t.Key == nil || t.Value == nil {
+ continue
+ }
+ name := strutil.SanitizeLabelName(*t.Key)
+ labels[lightsailLabelTag+model.LabelName(name)] = model.LabelValue(*t.Value)
+ }
+
+ tg.Targets = append(tg.Targets, labels)
+ }
+ return []*targetgroup.Group{tg}, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go b/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go
index d3397861b..6de0995ef 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go
@@ -27,8 +27,8 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -46,6 +46,7 @@ const (
azureLabelMachineID = azureLabel + "machine_id"
azureLabelMachineResourceGroup = azureLabel + "machine_resource_group"
azureLabelMachineName = azureLabel + "machine_name"
+ azureLabelMachineComputerName = azureLabel + "machine_computer_name"
azureLabelMachineOSType = azureLabel + "machine_os_type"
azureLabelMachineLocation = azureLabel + "machine_location"
azureLabelMachinePrivateIP = azureLabel + "machine_private_ip"
@@ -226,6 +227,7 @@ type azureResource struct {
type virtualMachine struct {
ID string
Name string
+ ComputerName string
Type string
Location string
OsType string
@@ -306,6 +308,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
azureLabelTenantID: model.LabelValue(d.cfg.TenantID),
azureLabelMachineID: model.LabelValue(vm.ID),
azureLabelMachineName: model.LabelValue(vm.Name),
+ azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup),
@@ -373,7 +376,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
var tg targetgroup.Group
for tgt := range ch {
if tgt.err != nil {
- return nil, errors.Wrap(err, "unable to complete Azure service discovery")
+ return nil, errors.Wrap(tgt.err, "unable to complete Azure service discovery")
}
if tgt.labelSet != nil {
tg.Targets = append(tg.Targets, tgt.labelSet)
@@ -449,6 +452,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
osType := string(vm.StorageProfile.OsDisk.OsType)
tags := map[string]*string{}
networkInterfaces := []string{}
+ var computerName string
if vm.Tags != nil {
tags = vm.Tags
@@ -460,9 +464,14 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
}
}
+ if vm.VirtualMachineProperties != nil && vm.VirtualMachineProperties.OsProfile != nil {
+ computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
+ }
+
return virtualMachine{
ID: *(vm.ID),
Name: *(vm.Name),
+ ComputerName: computerName,
Type: *(vm.Type),
Location: *(vm.Location),
OsType: osType,
@@ -476,6 +485,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
osType := string(vm.StorageProfile.OsDisk.OsType)
tags := map[string]*string{}
networkInterfaces := []string{}
+ var computerName string
if vm.Tags != nil {
tags = vm.Tags
@@ -487,9 +497,14 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
}
}
+ if vm.VirtualMachineScaleSetVMProperties != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile != nil {
+ computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName)
+ }
+
return virtualMachine{
ID: *(vm.ID),
Name: *(vm.Name),
+ ComputerName: computerName,
Type: *(vm.Type),
Location: *(vm.Location),
OsType: osType,
diff --git a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go
index ee2734def..19dc9ecca 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go
@@ -17,15 +17,13 @@ import (
"context"
"fmt"
"net"
- "net/http"
"strconv"
"strings"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
consul "github.com/hashicorp/consul/api"
- conntrack "github.com/mwitkow/go-conntrack"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config"
@@ -60,6 +58,8 @@ const (
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
// datacenterLabel is the name of the label containing the datacenter ID.
datacenterLabel = model.MetaLabelPrefix + "consul_dc"
+ // namespaceLabel is the name of the label containing the namespace (Consul Enterprise only).
+ namespaceLabel = model.MetaLabelPrefix + "consul_namespace"
// taggedAddressesLabel is the prefix for the labels mapping to a target's tagged addresses.
taggedAddressesLabel = model.MetaLabelPrefix + "consul_tagged_address_"
// serviceIDLabel is the name of the label containing the service ID.
@@ -92,18 +92,18 @@ var (
// DefaultSDConfig is the default Consul SD configuration.
DefaultSDConfig = SDConfig{
- TagSeparator: ",",
- Scheme: "http",
- Server: "localhost:8500",
- AllowStale: true,
- RefreshInterval: model.Duration(30 * time.Second),
+ TagSeparator: ",",
+ Scheme: "http",
+ Server: "localhost:8500",
+ AllowStale: true,
+ RefreshInterval: model.Duration(30 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
)
func init() {
discovery.RegisterConfig(&SDConfig{})
- prometheus.MustRegister(rpcFailuresCount)
- prometheus.MustRegister(rpcDuration)
+ prometheus.MustRegister(rpcFailuresCount, rpcDuration)
}
// SDConfig is the configuration for Consul service discovery.
@@ -111,6 +111,7 @@ type SDConfig struct {
Server string `yaml:"server,omitempty"`
Token config.Secret `yaml:"token,omitempty"`
Datacenter string `yaml:"datacenter,omitempty"`
+ Namespace string `yaml:"namespace,omitempty"`
TagSeparator string `yaml:"tag_separator,omitempty"`
Scheme string `yaml:"scheme,omitempty"`
Username string `yaml:"username,omitempty"`
@@ -134,7 +135,7 @@ type SDConfig struct {
// Desired node metadata.
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
- TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
}
// Name returns the name of the Config.
@@ -147,7 +148,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
- c.TLSConfig.SetDirectory(dir)
+ c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -161,7 +162,19 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if strings.TrimSpace(c.Server) == "" {
return errors.New("consul SD configuration requires a server address")
}
- return nil
+ if c.Username != "" || c.Password != "" {
+ if c.HTTPClientConfig.BasicAuth != nil {
+ return errors.New("at most one of consul SD configuration username and password and basic auth can be configured")
+ }
+ c.HTTPClientConfig.BasicAuth = &config.BasicAuth{
+ Username: c.Username,
+ Password: c.Password,
+ }
+ }
+ if c.Token != "" && (c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil) {
+ return errors.New("at most one of consul SD token, authorization, or oauth2 can be configured")
+ }
+ return c.HTTPClientConfig.Validate()
}
// Discovery retrieves target information from a Consul server
@@ -169,6 +182,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type Discovery struct {
client *consul.Client
clientDatacenter string
+ clientNamespace string
tagSeparator string
watchedServices []string // Set of services which will be discovered.
watchedTags []string // Tags used to filter instances of a service.
@@ -185,32 +199,18 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
logger = log.NewNopLogger()
}
- tls, err := config.NewTLSConfig(&conf.TLSConfig)
+ wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithHTTP2Disabled(), config.WithIdleConnTimeout(2*watchTimeout))
if err != nil {
return nil, err
}
- transport := &http.Transport{
- IdleConnTimeout: 2 * time.Duration(watchTimeout),
- TLSClientConfig: tls,
- DialContext: conntrack.NewDialContextFunc(
- conntrack.DialWithTracing(),
- conntrack.DialWithName("consul_sd"),
- ),
- }
- wrapper := &http.Client{
- Transport: transport,
- Timeout: time.Duration(watchTimeout) + 15*time.Second,
- }
+ wrapper.Timeout = watchTimeout + 15*time.Second
clientConf := &consul.Config{
Address: conf.Server,
Scheme: conf.Scheme,
Datacenter: conf.Datacenter,
+ Namespace: conf.Namespace,
Token: string(conf.Token),
- HttpAuth: &consul.HttpBasicAuth{
- Username: conf.Username,
- Password: string(conf.Password),
- },
HttpClient: wrapper,
}
client, err := consul.NewClient(clientConf)
@@ -226,7 +226,8 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
allowStale: conf.AllowStale,
refreshInterval: time.Duration(conf.RefreshInterval),
clientDatacenter: conf.Datacenter,
- finalizer: transport.CloseIdleConnections,
+ clientNamespace: conf.Namespace,
+ finalizer: wrapper.CloseIdleConnections,
logger: logger,
}
return cd, nil
@@ -426,6 +427,15 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
}
}
}
+
+ // Send targetgroup with no targets if nothing was discovered.
+ if len(services) == 0 {
+ select {
+ case <-ctx.Done():
+ return
+ case ch <- []*targetgroup.Group{{}}:
+ }
+ }
}
// consulService contains data belonging to the same service.
@@ -535,6 +545,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
model.AddressLabel: model.LabelValue(addr),
addressLabel: model.LabelValue(serviceNode.Node.Address),
nodeLabel: model.LabelValue(serviceNode.Node.Node),
+ namespaceLabel: model.LabelValue(serviceNode.Service.Namespace),
tagsLabel: model.LabelValue(tags),
serviceAddressLabel: model.LabelValue(serviceNode.Service.Address),
servicePortLabel: model.LabelValue(strconv.Itoa(serviceNode.Service.Port)),
diff --git a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go
index 25436e2e4..b887ada04 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go
@@ -23,7 +23,7 @@ import (
"time"
"github.com/digitalocean/godo"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
@@ -38,6 +38,7 @@ const (
doLabelID = doLabel + "droplet_id"
doLabelName = doLabel + "droplet_name"
doLabelImage = doLabel + "image"
+ doLabelImageName = doLabel + "image_name"
doLabelPrivateIPv4 = doLabel + "private_ipv4"
doLabelPublicIPv4 = doLabel + "public_ipv4"
doLabelPublicIPv6 = doLabel + "public_ipv6"
@@ -46,13 +47,15 @@ const (
doLabelStatus = doLabel + "status"
doLabelFeatures = doLabel + "features"
doLabelTags = doLabel + "tags"
+ doLabelVPC = doLabel + "vpc"
separator = ","
)
// DefaultSDConfig is the default DigitalOcean SD configuration.
var DefaultSDConfig = SDConfig{
- Port: 80,
- RefreshInterval: model.Duration(60 * time.Second),
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -88,7 +91,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
- return nil
+ return c.HTTPClientConfig.Validate()
}
// Discovery periodically performs DigitalOcean requests. It implements
@@ -105,7 +108,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
port: conf.Port,
}
- rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", false, false)
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", config.WithHTTP2Disabled())
if err != nil {
return nil, err
}
@@ -161,12 +164,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)),
doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug),
+ doLabelImageName: model.LabelValue(droplet.Image.Name),
doLabelPrivateIPv4: model.LabelValue(privateIPv4),
doLabelPublicIPv4: model.LabelValue(publicIPv4),
doLabelPublicIPv6: model.LabelValue(publicIPv6),
doLabelRegion: model.LabelValue(droplet.Region.Slug),
doLabelSize: model.LabelValue(droplet.SizeSlug),
doLabelStatus: model.LabelValue(droplet.Status),
+ doLabelVPC: model.LabelValue(droplet.VPCUUID),
}
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))
diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go
index 5b0402bdb..f2b87d992 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go
@@ -17,7 +17,7 @@ import (
"context"
"reflect"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go
index 7b388ca71..6b9860d62 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go
@@ -21,8 +21,8 @@ import (
"sync"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/miekg/dns"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -68,8 +68,7 @@ var (
func init() {
discovery.RegisterConfig(&SDConfig{})
- prometheus.MustRegister(dnsSDLookupFailuresCount)
- prometheus.MustRegister(dnsSDLookupsCount)
+ prometheus.MustRegister(dnsSDLookupFailuresCount, dnsSDLookupsCount)
}
// SDConfig is the configuration for DNS based service discovery.
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/network.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/network.go
deleted file mode 100644
index 7d70169a6..000000000
--- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/network.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package dockerswarm
-
-import (
- "context"
- "fmt"
-
- "github.com/docker/docker/api/types"
-
- "github.com/prometheus/prometheus/util/strutil"
-)
-
-const (
- swarmLabelNetworkPrefix = swarmLabel + "network_"
- swarmLabelNetworkID = swarmLabelNetworkPrefix + "id"
- swarmLabelNetworkName = swarmLabelNetworkPrefix + "name"
- swarmLabelNetworkScope = swarmLabelNetworkPrefix + "scope"
- swarmLabelNetworkInternal = swarmLabelNetworkPrefix + "internal"
- swarmLabelNetworkIngress = swarmLabelNetworkPrefix + "ingress"
- swarmLabelNetworkLabelPrefix = swarmLabelNetworkPrefix + "label_"
-)
-
-func (d *Discovery) getNetworksLabels(ctx context.Context) (map[string]map[string]string, error) {
- networks, err := d.client.NetworkList(ctx, types.NetworkListOptions{})
- if err != nil {
- return nil, err
- }
- labels := make(map[string]map[string]string, len(networks))
- for _, network := range networks {
- labels[network.ID] = map[string]string{
- swarmLabelNetworkID: network.ID,
- swarmLabelNetworkName: network.Name,
- swarmLabelNetworkScope: network.Scope,
- swarmLabelNetworkInternal: fmt.Sprintf("%t", network.Internal),
- swarmLabelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
- }
- for k, v := range network.Labels {
- ln := strutil.SanitizeLabelName(k)
- labels[network.ID][swarmLabelNetworkLabelPrefix+ln] = v
- }
- }
-
- return labels, nil
-}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/ec2/ec2.go b/vendor/github.com/prometheus/prometheus/discovery/ec2/ec2.go
deleted file mode 100644
index aca83fbba..000000000
--- a/vendor/github.com/prometheus/prometheus/discovery/ec2/ec2.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ec2
-
-import (
- "context"
- "fmt"
- "net"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/ec2"
- "github.com/go-kit/kit/log"
- "github.com/pkg/errors"
- "github.com/prometheus/common/config"
- "github.com/prometheus/common/model"
-
- "github.com/prometheus/prometheus/discovery"
- "github.com/prometheus/prometheus/discovery/refresh"
- "github.com/prometheus/prometheus/discovery/targetgroup"
- "github.com/prometheus/prometheus/util/strutil"
-)
-
-const (
- ec2Label = model.MetaLabelPrefix + "ec2_"
- ec2LabelAMI = ec2Label + "ami"
- ec2LabelAZ = ec2Label + "availability_zone"
- ec2LabelArch = ec2Label + "architecture"
- ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses"
- ec2LabelInstanceID = ec2Label + "instance_id"
- ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle"
- ec2LabelInstanceState = ec2Label + "instance_state"
- ec2LabelInstanceType = ec2Label + "instance_type"
- ec2LabelOwnerID = ec2Label + "owner_id"
- ec2LabelPlatform = ec2Label + "platform"
- ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
- ec2LabelPrivateDNS = ec2Label + "private_dns_name"
- ec2LabelPrivateIP = ec2Label + "private_ip"
- ec2LabelPublicDNS = ec2Label + "public_dns_name"
- ec2LabelPublicIP = ec2Label + "public_ip"
- ec2LabelSubnetID = ec2Label + "subnet_id"
- ec2LabelTag = ec2Label + "tag_"
- ec2LabelVPCID = ec2Label + "vpc_id"
- ec2LabelSeparator = ","
-)
-
-// DefaultSDConfig is the default EC2 SD configuration.
-var DefaultSDConfig = SDConfig{
- Port: 80,
- RefreshInterval: model.Duration(60 * time.Second),
-}
-
-func init() {
- discovery.RegisterConfig(&SDConfig{})
-}
-
-// Filter is the configuration for filtering EC2 instances.
-type Filter struct {
- Name string `yaml:"name"`
- Values []string `yaml:"values"`
-}
-
-// SDConfig is the configuration for EC2 based service discovery.
-type SDConfig struct {
- Endpoint string `yaml:"endpoint"`
- Region string `yaml:"region"`
- AccessKey string `yaml:"access_key,omitempty"`
- SecretKey config.Secret `yaml:"secret_key,omitempty"`
- Profile string `yaml:"profile,omitempty"`
- RoleARN string `yaml:"role_arn,omitempty"`
- RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
- Port int `yaml:"port"`
- Filters []*Filter `yaml:"filters"`
-}
-
-// Name returns the name of the Config.
-func (*SDConfig) Name() string { return "ec2" }
-
-// NewDiscoverer returns a Discoverer for the Config.
-func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
- return NewDiscovery(c, opts.Logger), nil
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
- *c = DefaultSDConfig
- type plain SDConfig
- err := unmarshal((*plain)(c))
- if err != nil {
- return err
- }
- if c.Region == "" {
- sess, err := session.NewSession()
- if err != nil {
- return err
- }
- metadata := ec2metadata.New(sess)
- region, err := metadata.Region()
- if err != nil {
- return errors.New("EC2 SD configuration requires a region")
- }
- c.Region = region
- }
- for _, f := range c.Filters {
- if len(f.Values) == 0 {
- return errors.New("EC2 SD configuration filter values cannot be empty")
- }
- }
- return nil
-}
-
-// Discovery periodically performs EC2-SD requests. It implements
-// the Discoverer interface.
-type Discovery struct {
- *refresh.Discovery
- region string
- interval time.Duration
- port int
- filters []*Filter
- ec2 *ec2.EC2
-}
-
-// NewDiscovery returns a new EC2Discovery which periodically refreshes its targets.
-func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery {
- creds := credentials.NewStaticCredentials(conf.AccessKey, string(conf.SecretKey), "")
- if conf.AccessKey == "" && conf.SecretKey == "" {
- creds = nil
- }
- if logger == nil {
- logger = log.NewNopLogger()
- }
-
- sess, err := session.NewSessionWithOptions(session.Options{
- Config: aws.Config{
- Endpoint: &conf.Endpoint,
- Region: &conf.Region,
- Credentials: creds,
- },
- Profile: conf.Profile,
- })
- if err != nil {
- return nil
- }
-
- var ec2s *ec2.EC2
- if conf.RoleARN != "" {
- creds := stscreds.NewCredentials(sess, conf.RoleARN)
- ec2s = ec2.New(sess, &aws.Config{Credentials: creds})
- } else {
- ec2s = ec2.New(sess)
- }
-
- d := &Discovery{
- region: conf.Region,
- filters: conf.Filters,
- interval: time.Duration(conf.RefreshInterval),
- port: conf.Port,
- ec2: ec2s,
- }
- d.Discovery = refresh.NewDiscovery(
- logger,
- "ec2",
- time.Duration(conf.RefreshInterval),
- d.refresh,
- )
- return d
-}
-
-func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
- tg := &targetgroup.Group{
- Source: d.region,
- }
-
- var filters []*ec2.Filter
- for _, f := range d.filters {
- filters = append(filters, &ec2.Filter{
- Name: aws.String(f.Name),
- Values: aws.StringSlice(f.Values),
- })
- }
-
- input := &ec2.DescribeInstancesInput{Filters: filters}
-
- if err := d.ec2.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool {
- for _, r := range p.Reservations {
- for _, inst := range r.Instances {
- if inst.PrivateIpAddress == nil {
- continue
- }
- labels := model.LabelSet{
- ec2LabelInstanceID: model.LabelValue(*inst.InstanceId),
- }
-
- if r.OwnerId != nil {
- labels[ec2LabelOwnerID] = model.LabelValue(*r.OwnerId)
- }
-
- labels[ec2LabelPrivateIP] = model.LabelValue(*inst.PrivateIpAddress)
- if inst.PrivateDnsName != nil {
- labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
- }
- addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.port))
- labels[model.AddressLabel] = model.LabelValue(addr)
-
- if inst.Platform != nil {
- labels[ec2LabelPlatform] = model.LabelValue(*inst.Platform)
- }
-
- if inst.PublicIpAddress != nil {
- labels[ec2LabelPublicIP] = model.LabelValue(*inst.PublicIpAddress)
- labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName)
- }
-
- labels[ec2LabelAMI] = model.LabelValue(*inst.ImageId)
- labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
- labels[ec2LabelInstanceState] = model.LabelValue(*inst.State.Name)
- labels[ec2LabelInstanceType] = model.LabelValue(*inst.InstanceType)
-
- if inst.InstanceLifecycle != nil {
- labels[ec2LabelInstanceLifecycle] = model.LabelValue(*inst.InstanceLifecycle)
- }
-
- if inst.Architecture != nil {
- labels[ec2LabelArch] = model.LabelValue(*inst.Architecture)
- }
-
- if inst.VpcId != nil {
- labels[ec2LabelVPCID] = model.LabelValue(*inst.VpcId)
- labels[ec2LabelPrimarySubnetID] = model.LabelValue(*inst.SubnetId)
-
- var subnets []string
- var ipv6addrs []string
- subnetsMap := make(map[string]struct{})
- for _, eni := range inst.NetworkInterfaces {
- if eni.SubnetId == nil {
- continue
- }
- // Deduplicate VPC Subnet IDs maintaining the order of the subnets returned by EC2.
- if _, ok := subnetsMap[*eni.SubnetId]; !ok {
- subnetsMap[*eni.SubnetId] = struct{}{}
- subnets = append(subnets, *eni.SubnetId)
- }
-
- for _, ipv6addr := range eni.Ipv6Addresses {
- ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address)
- }
- }
- labels[ec2LabelSubnetID] = model.LabelValue(
- ec2LabelSeparator +
- strings.Join(subnets, ec2LabelSeparator) +
- ec2LabelSeparator)
- if len(ipv6addrs) > 0 {
- labels[ec2LabelIPv6Addresses] = model.LabelValue(
- ec2LabelSeparator +
- strings.Join(ipv6addrs, ec2LabelSeparator) +
- ec2LabelSeparator)
- }
- }
-
- for _, t := range inst.Tags {
- if t == nil || t.Key == nil || t.Value == nil {
- continue
- }
- name := strutil.SanitizeLabelName(*t.Key)
- labels[ec2LabelTag+model.LabelName(name)] = model.LabelValue(*t.Value)
- }
- tg.Targets = append(tg.Targets, labels)
- }
- }
- return true
- }); err != nil {
- return nil, errors.Wrap(err, "could not describe instances")
- }
- return []*targetgroup.Group{tg}, nil
-}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/eureka/eureka.go b/vendor/github.com/prometheus/prometheus/discovery/eureka/eureka.go
index 5cc3b5b50..dcfc2be10 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/eureka/eureka.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/eureka/eureka.go
@@ -21,7 +21,7 @@ import (
"strconv"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -60,7 +60,8 @@ const (
// DefaultSDConfig is the default Eureka SD configuration.
var DefaultSDConfig = SDConfig{
- RefreshInterval: model.Duration(30 * time.Second),
+ RefreshInterval: model.Duration(30 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -115,9 +116,9 @@ type Discovery struct {
server string
}
-// New creates a new Eureka discovery for the given role.
+// NewDiscovery creates a new Eureka discovery for the given role.
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
- rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", false, false)
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", config.WithHTTP2Disabled())
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go
index be5dee5c5..088fa12d4 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go
@@ -25,8 +25,8 @@ import (
"sync"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config"
@@ -39,6 +39,19 @@ import (
)
var (
+ fileSDScanDuration = prometheus.NewSummary(
+ prometheus.SummaryOpts{
+ Name: "prometheus_sd_file_scan_duration_seconds",
+ Help: "The duration of the File-SD scan in seconds.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ })
+ fileSDReadErrorsCount = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Name: "prometheus_sd_file_read_errors_total",
+ Help: "The number of File-SD read errors.",
+ })
+ fileSDTimeStamp = NewTimestampCollector()
+
patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`)
// DefaultSDConfig is the default file SD configuration.
@@ -49,6 +62,7 @@ var (
func init() {
discovery.RegisterConfig(&SDConfig{})
+ prometheus.MustRegister(fileSDScanDuration, fileSDReadErrorsCount, fileSDTimeStamp)
}
// SDConfig is the configuration for file based discovery.
@@ -153,27 +167,6 @@ func NewTimestampCollector() *TimestampCollector {
}
}
-var (
- fileSDScanDuration = prometheus.NewSummary(
- prometheus.SummaryOpts{
- Name: "prometheus_sd_file_scan_duration_seconds",
- Help: "The duration of the File-SD scan in seconds.",
- Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
- })
- fileSDReadErrorsCount = prometheus.NewCounter(
- prometheus.CounterOpts{
- Name: "prometheus_sd_file_read_errors_total",
- Help: "The number of File-SD read errors.",
- })
- fileSDTimeStamp = NewTimestampCollector()
-)
-
-func init() {
- prometheus.MustRegister(fileSDScanDuration)
- prometheus.MustRegister(fileSDReadErrorsCount)
- prometheus.MustRegister(fileSDTimeStamp)
-}
-
// Discovery provides service discovery functionality based
// on files that contain target groups in JSON or YAML format. Refreshing
// happens using file watches and periodic refreshes.
diff --git a/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go b/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go
index 231c8fdd3..e17c60f71 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go
@@ -21,7 +21,7 @@ import (
"strings"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"golang.org/x/oauth2/google"
@@ -178,6 +178,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
addr := fmt.Sprintf("%s:%d", priIface.NetworkIP, d.port)
labels[model.AddressLabel] = model.LabelValue(addr)
+ // Append named interface metadata for all interfaces
+ for _, iface := range inst.NetworkInterfaces {
+ gceLabelNetAddress := model.LabelName(fmt.Sprintf("%sinterface_ipv4_%s", gceLabel, strutil.SanitizeLabelName(iface.Name)))
+ labels[gceLabelNetAddress] = model.LabelValue(iface.NetworkIP)
+ }
+
// Tags in GCE are usually only used for networking rules.
if inst.Tags != nil && len(inst.Tags.Items) > 0 {
// We surround the separated list with the separator as well. This way regular expressions
diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go
index 20cfd1e58..494321fd5 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go
@@ -21,7 +21,7 @@ import (
"strconv"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -47,6 +47,7 @@ const (
hetznerLabelHcloudDiskGB = hetznerHcloudLabelPrefix + "disk_size_gb"
hetznerLabelHcloudType = hetznerHcloudLabelPrefix + "server_type"
hetznerLabelHcloudLabel = hetznerHcloudLabelPrefix + "label_"
+ hetznerLabelHcloudLabelPresent = hetznerHcloudLabelPrefix + "labelpresent_"
)
// Discovery periodically performs Hetzner Cloud requests. It implements
@@ -63,7 +64,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er
port: conf.Port,
}
- rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", false, false)
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
if err != nil {
return nil, err
}
@@ -124,6 +125,9 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
}
}
for labelKey, labelValue := range server.Labels {
+ presentLabel := model.LabelName(hetznerLabelHcloudLabelPresent + strutil.SanitizeLabelName(labelKey))
+ labels[presentLabel] = model.LabelValue("true")
+
label := model.LabelName(hetznerLabelHcloudLabel + strutil.SanitizeLabelName(labelKey))
labels[label] = model.LabelValue(labelValue)
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go
index 9d1e3b264..b7c7578e2 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go
@@ -17,7 +17,7 @@ import (
"context"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
@@ -41,8 +41,9 @@ const (
// DefaultSDConfig is the default Hetzner SD configuration.
var DefaultSDConfig = SDConfig{
- Port: 80,
- RefreshInterval: model.Duration(60 * time.Second),
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -110,7 +111,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Role == "" {
return errors.New("role missing (one of: robot, hcloud)")
}
- return nil
+ return c.HTTPClientConfig.Validate()
}
// Discovery periodically performs Hetzner requests. It implements
diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go
index f3041d2c6..f7079d909 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go
@@ -25,7 +25,7 @@ import (
"strings"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -59,7 +59,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
endpoint: conf.robotEndpoint,
}
- rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", false, false)
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
if err != nil {
return nil, err
}
@@ -92,8 +92,13 @@ func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, err
return nil, errors.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
}
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
var servers serversList
- err = json.NewDecoder(resp.Body).Decode(&servers)
+ err = json.Unmarshal(b, &servers)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/http/http.go b/vendor/github.com/prometheus/prometheus/discovery/http/http.go
new file mode 100644
index 000000000..af2e99939
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/http/http.go
@@ -0,0 +1,200 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package http
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/version"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+)
+
+var (
+ // DefaultSDConfig is the default HTTP SD configuration.
+ DefaultSDConfig = SDConfig{
+ RefreshInterval: model.Duration(60 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+ }
+ userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
+ matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
+)
+
+func init() {
+ discovery.RegisterConfig(&SDConfig{})
+}
+
+// SDConfig is the configuration for HTTP based discovery.
+type SDConfig struct {
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
+ URL string `yaml:"url"`
+}
+
+// Name returns the name of the Config.
+func (*SDConfig) Name() string { return "http" }
+
+// NewDiscoverer returns a Discoverer for the Config.
+func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewDiscovery(c, opts.Logger)
+}
+
+// SetDirectory joins any relative file paths with dir.
+func (c *SDConfig) SetDirectory(dir string) {
+ c.HTTPClientConfig.SetDirectory(dir)
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultSDConfig
+ type plain SDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+ if c.URL == "" {
+ return fmt.Errorf("URL is missing")
+ }
+ parsedURL, err := url.Parse(c.URL)
+ if err != nil {
+ return err
+ }
+ if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
+ return fmt.Errorf("URL scheme must be 'http' or 'https'")
+ }
+ if parsedURL.Host == "" {
+ return fmt.Errorf("host is missing in URL")
+ }
+ return nil
+}
+
+const httpSDURLLabel = model.MetaLabelPrefix + "url"
+
+// Discovery provides service discovery functionality based
+// on HTTP endpoints that return target groups in JSON format.
+type Discovery struct {
+ *refresh.Discovery
+ url string
+ client *http.Client
+ refreshInterval time.Duration
+ tgLastLength int
+}
+
+// NewDiscovery returns a new HTTP discovery for the given config.
+func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
+ if logger == nil {
+ logger = log.NewNopLogger()
+ }
+
+ client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", config.WithHTTP2Disabled())
+ if err != nil {
+ return nil, err
+ }
+ client.Timeout = time.Duration(conf.RefreshInterval)
+
+ d := &Discovery{
+ url: conf.URL,
+ client: client,
+ refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers.
+ }
+
+ d.Discovery = refresh.NewDiscovery(
+ logger,
+ "http",
+ time.Duration(conf.RefreshInterval),
+ d.refresh,
+ )
+ return d, nil
+}
+
+func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ req, err := http.NewRequest("GET", d.url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("X-Prometheus-Refresh-Interval-Seconds", strconv.FormatFloat(d.refreshInterval.Seconds(), 'f', -1, 64))
+
+ resp, err := d.client.Do(req.WithContext(ctx))
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.Errorf("server returned HTTP status %s", resp.Status)
+ }
+
+ if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) {
+ return nil, errors.Errorf("unsupported content type %q", resp.Header.Get("Content-Type"))
+ }
+
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var targetGroups []*targetgroup.Group
+
+ if err := json.Unmarshal(b, &targetGroups); err != nil {
+ return nil, err
+ }
+
+ for i, tg := range targetGroups {
+ if tg == nil {
+ err = errors.New("nil target group item found")
+ return nil, err
+ }
+
+ tg.Source = urlSource(d.url, i)
+ if tg.Labels == nil {
+ tg.Labels = model.LabelSet{}
+ }
+ tg.Labels[httpSDURLLabel] = model.LabelValue(d.url)
+ }
+
+ // Generate empty updates for sources that disappeared.
+ l := len(targetGroups)
+ for i := l; i < d.tgLastLength; i++ {
+ targetGroups = append(targetGroups, &targetgroup.Group{Source: urlSource(d.url, i)})
+ }
+ d.tgLastLength = l
+
+ return targetGroups, nil
+}
+
+// urlSource returns a source ID for the i-th target group per URL.
+func urlSource(url string, i int) string {
+ return fmt.Sprintf("%s:%d", url, i)
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/install/install.go b/vendor/github.com/prometheus/prometheus/discovery/install/install.go
index d9394f270..34ccf3d0f 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/install/install.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/install/install.go
@@ -16,19 +16,23 @@
package install
import (
+ _ "github.com/prometheus/prometheus/discovery/aws" // register aws
_ "github.com/prometheus/prometheus/discovery/azure" // register azure
_ "github.com/prometheus/prometheus/discovery/consul" // register consul
_ "github.com/prometheus/prometheus/discovery/digitalocean" // register digitalocean
_ "github.com/prometheus/prometheus/discovery/dns" // register dns
- _ "github.com/prometheus/prometheus/discovery/dockerswarm" // register dockerswarm
- _ "github.com/prometheus/prometheus/discovery/ec2" // register ec2
_ "github.com/prometheus/prometheus/discovery/eureka" // register eureka
_ "github.com/prometheus/prometheus/discovery/file" // register file
_ "github.com/prometheus/prometheus/discovery/gce" // register gce
_ "github.com/prometheus/prometheus/discovery/hetzner" // register hetzner
+ _ "github.com/prometheus/prometheus/discovery/http" // register http
_ "github.com/prometheus/prometheus/discovery/kubernetes" // register kubernetes
+ _ "github.com/prometheus/prometheus/discovery/linode" // register linode
_ "github.com/prometheus/prometheus/discovery/marathon" // register marathon
+ _ "github.com/prometheus/prometheus/discovery/moby" // register moby
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
+ _ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
+ _ "github.com/prometheus/prometheus/discovery/xds" // register xds
_ "github.com/prometheus/prometheus/discovery/zookeeper" // register zookeeper
)
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go
index c44ef21aa..45e249be2 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go
@@ -14,6 +14,7 @@
package kubernetes
import (
+ "context"
"net/url"
"time"
@@ -120,10 +121,10 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
clientGoRequestLatencyMetricVec,
)
}
-func (clientGoRequestMetricAdapter) Increment(code string, method string, host string) {
+func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) {
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
}
-func (clientGoRequestMetricAdapter) Observe(verb string, u url.URL, latency time.Duration) {
+func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go
index f905ae92c..8fc158dd4 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go
@@ -18,8 +18,10 @@ import (
"net"
"strconv"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/prometheus/prometheus/util/strutil"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
apiv1 "k8s.io/api/core/v1"
@@ -199,6 +201,8 @@ func endpointsSourceFromNamespaceAndName(namespace, name string) string {
}
const (
+ endpointsLabelPrefix = metaLabelPrefix + "endpoints_label_"
+ endpointsLabelPresentPrefix = metaLabelPrefix + "endpoints_labelpresent_"
endpointsNameLabel = metaLabelPrefix + "endpoints_name"
endpointNodeName = metaLabelPrefix + "endpoint_node_name"
endpointHostname = metaLabelPrefix + "endpoint_hostname"
@@ -218,6 +222,12 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
endpointsNameLabel: lv(eps.Name),
}
e.addServiceLabels(eps.Namespace, eps.Name, tg)
+ // Add endpoints labels metadata.
+ for k, v := range eps.Labels {
+ ln := strutil.SanitizeLabelName(k)
+ tg.Labels[model.LabelName(endpointsLabelPrefix+ln)] = lv(v)
+ tg.Labels[model.LabelName(endpointsLabelPresentPrefix+ln)] = presentValue
+ }
type podEntry struct {
pod *apiv1.Pod
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go
index 4bb4bd5b6..71d4e3776 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go
@@ -18,8 +18,8 @@ import (
"net"
"strconv"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
apiv1 "k8s.io/api/core/v1"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go
index a6ef746e4..070785f29 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go
@@ -15,11 +15,13 @@ package kubernetes
import (
"context"
+ "strings"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
+ v1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@@ -112,26 +114,24 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
send(ctx, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)})
return true
}
- eps, err := convertToIngress(o)
- if err != nil {
- level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", err)
+
+ var ia ingressAdaptor
+ switch ingress := o.(type) {
+ case *v1.Ingress:
+ ia = newIngressAdaptorFromV1(ingress)
+ case *v1beta1.Ingress:
+ ia = newIngressAdaptorFromV1beta1(ingress)
+ default:
+ level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err",
+ errors.Errorf("received unexpected object: %v", o))
return true
}
- send(ctx, ch, i.buildIngress(eps))
+ send(ctx, ch, i.buildIngress(ia))
return true
}
-func convertToIngress(o interface{}) (*v1beta1.Ingress, error) {
- ingress, ok := o.(*v1beta1.Ingress)
- if ok {
- return ingress, nil
- }
-
- return nil, errors.Errorf("received unexpected object: %v", o)
-}
-
-func ingressSource(s *v1beta1.Ingress) string {
- return ingressSourceFromNamespaceAndName(s.Namespace, s.Name)
+func ingressSource(s ingressAdaptor) string {
+ return ingressSourceFromNamespaceAndName(s.namespace(), s.name())
}
func ingressSourceFromNamespaceAndName(namespace, name string) string {
@@ -147,21 +147,25 @@ const (
ingressSchemeLabel = metaLabelPrefix + "ingress_scheme"
ingressHostLabel = metaLabelPrefix + "ingress_host"
ingressPathLabel = metaLabelPrefix + "ingress_path"
+ ingressClassNameLabel = metaLabelPrefix + "ingress_class_name"
)
-func ingressLabels(ingress *v1beta1.Ingress) model.LabelSet {
+func ingressLabels(ingress ingressAdaptor) model.LabelSet {
// Each label and annotation will create two key-value pairs in the map.
- ls := make(model.LabelSet, 2*(len(ingress.Labels)+len(ingress.Annotations))+2)
- ls[ingressNameLabel] = lv(ingress.Name)
- ls[namespaceLabel] = lv(ingress.Namespace)
+ ls := make(model.LabelSet, 2*(len(ingress.labels())+len(ingress.annotations()))+2)
+ ls[ingressNameLabel] = lv(ingress.name())
+ ls[namespaceLabel] = lv(ingress.namespace())
+ if cls := ingress.ingressClassName(); cls != nil {
+ ls[ingressClassNameLabel] = lv(*cls)
+ }
- for k, v := range ingress.Labels {
+ for k, v := range ingress.labels() {
ln := strutil.SanitizeLabelName(k)
ls[model.LabelName(ingressLabelPrefix+ln)] = lv(v)
ls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue
}
- for k, v := range ingress.Annotations {
+ for k, v := range ingress.annotations() {
ln := strutil.SanitizeLabelName(k)
ls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v)
ls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue
@@ -169,14 +173,14 @@ func ingressLabels(ingress *v1beta1.Ingress) model.LabelSet {
return ls
}
-func pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string {
- if rv.HTTP == nil {
+func pathsFromIngressPaths(ingressPaths []string) []string {
+ if ingressPaths == nil {
return []string{"/"}
}
- paths := make([]string, len(rv.HTTP.Paths))
- for n, p := range rv.HTTP.Paths {
- path := p.Path
- if path == "" {
+ paths := make([]string, len(ingressPaths))
+ for n, p := range ingressPaths {
+ path := p
+ if p == "" {
path = "/"
}
paths[n] = path
@@ -184,33 +188,29 @@ func pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string {
return paths
}
-func (i *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {
+func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group {
tg := &targetgroup.Group{
Source: ingressSource(ingress),
}
tg.Labels = ingressLabels(ingress)
- tlsHosts := make(map[string]struct{})
- for _, tls := range ingress.Spec.TLS {
- for _, host := range tls.Hosts {
- tlsHosts[host] = struct{}{}
- }
- }
-
- for _, rule := range ingress.Spec.Rules {
- paths := pathsFromIngressRule(&rule.IngressRuleValue)
-
+ for _, rule := range ingress.rules() {
scheme := "http"
- _, isTLS := tlsHosts[rule.Host]
- if isTLS {
- scheme = "https"
+ paths := pathsFromIngressPaths(rule.paths())
+
+ out:
+ for _, pattern := range ingress.tlsHosts() {
+ if matchesHostnamePattern(pattern, rule.host()) {
+ scheme = "https"
+ break out
+ }
}
for _, path := range paths {
tg.Targets = append(tg.Targets, model.LabelSet{
- model.AddressLabel: lv(rule.Host),
+ model.AddressLabel: lv(rule.host()),
ingressSchemeLabel: lv(scheme),
- ingressHostLabel: lv(rule.Host),
+ ingressHostLabel: lv(rule.host()),
ingressPathLabel: lv(path),
})
}
@@ -218,3 +218,33 @@ func (i *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {
return tg
}
+
+// matchesHostnamePattern returns true if the host matches a wildcard DNS
+// pattern or pattern and host are equal.
+func matchesHostnamePattern(pattern, host string) bool {
+ if pattern == host {
+ return true
+ }
+
+ patternParts := strings.Split(pattern, ".")
+ hostParts := strings.Split(host, ".")
+
+ // If the first element of the pattern is not a wildcard, give up.
+ if len(patternParts) == 0 || patternParts[0] != "*" {
+ return false
+ }
+
+ // A wildcard match require the pattern to have the same length as the host
+ // path.
+ if len(patternParts) != len(hostParts) {
+ return false
+ }
+
+ for i := 1; i < len(patternParts); i++ {
+ if patternParts[i] != hostParts[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go
new file mode 100644
index 000000000..113a067ca
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ v1 "k8s.io/api/networking/v1"
+ "k8s.io/api/networking/v1beta1"
+)
+
+// ingressAdaptor is an adaptor for the different Ingress versions
+type ingressAdaptor interface {
+ name() string
+ namespace() string
+ labels() map[string]string
+ annotations() map[string]string
+ tlsHosts() []string
+ ingressClassName() *string
+ rules() []ingressRuleAdaptor
+}
+
+type ingressRuleAdaptor interface {
+ paths() []string
+ host() string
+}
+
+// Adaptor for networking.k8s.io/v1
+type ingressAdaptorV1 struct {
+ ingress *v1.Ingress
+}
+
+func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor {
+ return &ingressAdaptorV1{ingress: ingress}
+}
+
+func (i *ingressAdaptorV1) name() string { return i.ingress.Name }
+func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace }
+func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels }
+func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations }
+func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
+
+func (i *ingressAdaptorV1) tlsHosts() []string {
+ var hosts []string
+ for _, tls := range i.ingress.Spec.TLS {
+ hosts = append(hosts, tls.Hosts...)
+ }
+ return hosts
+}
+
+func (i *ingressAdaptorV1) rules() []ingressRuleAdaptor {
+ var rules []ingressRuleAdaptor
+ for _, rule := range i.ingress.Spec.Rules {
+ rules = append(rules, newIngressRuleAdaptorFromV1(rule))
+ }
+ return rules
+}
+
+type ingressRuleAdaptorV1 struct {
+ rule v1.IngressRule
+}
+
+func newIngressRuleAdaptorFromV1(rule v1.IngressRule) ingressRuleAdaptor {
+ return &ingressRuleAdaptorV1{rule: rule}
+}
+
+func (i *ingressRuleAdaptorV1) paths() []string {
+ rv := i.rule.IngressRuleValue
+ if rv.HTTP == nil {
+ return nil
+ }
+ paths := make([]string, len(rv.HTTP.Paths))
+ for n, p := range rv.HTTP.Paths {
+ paths[n] = p.Path
+ }
+ return paths
+}
+
+func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
+
+// Adaptor for networking.k8s.io/v1beta1
+type ingressAdaptorV1Beta1 struct {
+ ingress *v1beta1.Ingress
+}
+
+func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor {
+ return &ingressAdaptorV1Beta1{ingress: ingress}
+}
+
+func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name }
+func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace }
+func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels }
+func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations }
+func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
+
+func (i *ingressAdaptorV1Beta1) tlsHosts() []string {
+ var hosts []string
+ for _, tls := range i.ingress.Spec.TLS {
+ hosts = append(hosts, tls.Hosts...)
+ }
+ return hosts
+}
+
+func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor {
+ var rules []ingressRuleAdaptor
+ for _, rule := range i.ingress.Spec.Rules {
+ rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule))
+ }
+ return rules
+}
+
+type ingressRuleAdaptorV1Beta1 struct {
+ rule v1beta1.IngressRule
+}
+
+func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor {
+ return &ingressRuleAdaptorV1Beta1{rule: rule}
+}
+
+func (i *ingressRuleAdaptorV1Beta1) paths() []string {
+ rv := i.rule.IngressRuleValue
+ if rv.HTTP == nil {
+ return nil
+ }
+ paths := make([]string, len(rv.HTTP.Paths))
+ for n, p := range rv.HTTP.Paths {
+ paths[n] = p.Path
+ }
+ return paths
+}
+
+func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host }
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go
index 0183d061f..3a02922a4 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go
@@ -21,8 +21,8 @@ import (
"sync"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config"
@@ -30,15 +30,18 @@ import (
"github.com/prometheus/common/version"
apiv1 "k8s.io/api/core/v1"
disv1beta1 "k8s.io/api/discovery/v1beta1"
+ networkv1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
+ utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/tools/clientcmd"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
@@ -66,7 +69,9 @@ var (
[]string{"role", "event"},
)
// DefaultSDConfig is the default Kubernetes SD configuration
- DefaultSDConfig = SDConfig{}
+ DefaultSDConfig = SDConfig{
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+ }
)
func init() {
@@ -112,6 +117,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
type SDConfig struct {
APIServer config.URL `yaml:"api_server,omitempty"`
Role Role `yaml:"role"`
+ KubeConfig string `yaml:"kubeconfig_file"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"`
Selectors []SelectorConfig `yaml:"selectors,omitempty"`
@@ -128,6 +134,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
+ c.KubeConfig = config.JoinDir(dir, c.KubeConfig)
}
type roleSelector struct {
@@ -152,7 +159,7 @@ type resourceSelector struct {
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
- *c = SDConfig{}
+ *c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
@@ -165,7 +172,15 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
- if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.HTTPClientConfig{}) {
+ if c.APIServer.URL != nil && c.KubeConfig != "" {
+ // Api-server and kubeconfig_file are mutually exclusive
+ return errors.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
+ }
+ if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
+ // Kubeconfig_file and custom http config are mutually exclusive
+ return errors.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
+ }
+ if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
}
@@ -254,7 +269,12 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
kcfg *rest.Config
err error
)
- if conf.APIServer.URL == nil {
+ if conf.KubeConfig != "" {
+ kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
+ if err != nil {
+ return nil, err
+ }
+ } else if conf.APIServer.URL == nil {
// Use the Kubernetes provided pod service account
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
kcfg, err = rest.InClusterConfig()
@@ -263,7 +283,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
}
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
} else {
- rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", false, false)
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", config.WithHTTP2Disabled())
if err != nil {
return nil, err
}
@@ -473,23 +493,58 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
go svc.informer.Run(ctx.Done())
}
case RoleIngress:
+ // Check "networking.k8s.io/v1" availability with retries.
+ // If "v1" is not avaiable, use "networking.k8s.io/v1beta1" for backward compatibility
+ var v1Supported bool
+ if retryOnError(ctx, 10*time.Second,
+ func() (err error) {
+ v1Supported, err = checkNetworkingV1Supported(d.client)
+ if err != nil {
+ level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
+ }
+ return err
+ },
+ ) {
+ d.Unlock()
+ return
+ }
+
for _, namespace := range namespaces {
- i := d.client.NetworkingV1beta1().Ingresses(namespace)
- ilw := &cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- options.FieldSelector = d.selectors.ingress.field
- options.LabelSelector = d.selectors.ingress.label
- return i.List(ctx, options)
- },
- WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
- options.FieldSelector = d.selectors.ingress.field
- options.LabelSelector = d.selectors.ingress.label
- return i.Watch(ctx, options)
- },
+ var informer cache.SharedInformer
+ if v1Supported {
+ i := d.client.NetworkingV1().Ingresses(namespace)
+ ilw := &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ options.FieldSelector = d.selectors.ingress.field
+ options.LabelSelector = d.selectors.ingress.label
+ return i.List(ctx, options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ options.FieldSelector = d.selectors.ingress.field
+ options.LabelSelector = d.selectors.ingress.label
+ return i.Watch(ctx, options)
+ },
+ }
+ informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncPeriod)
+ } else {
+ i := d.client.NetworkingV1beta1().Ingresses(namespace)
+ ilw := &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ options.FieldSelector = d.selectors.ingress.field
+ options.LabelSelector = d.selectors.ingress.label
+ return i.List(ctx, options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ options.FieldSelector = d.selectors.ingress.field
+ options.LabelSelector = d.selectors.ingress.label
+ return i.Watch(ctx, options)
+ },
+ }
+ informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod)
}
ingress := NewIngress(
log.With(d.logger, "role", "ingress"),
- cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod),
+ informer,
)
d.discoverers = append(d.discoverers, ingress)
go ingress.informer.Run(ctx.Done())
@@ -545,3 +600,33 @@ func send(ctx context.Context, ch chan<- []*targetgroup.Group, tg *targetgroup.G
case ch <- []*targetgroup.Group{tg}:
}
}
+
+func retryOnError(ctx context.Context, interval time.Duration, f func() error) (canceled bool) {
+ var err error
+ err = f()
+ for {
+ if err == nil {
+ return false
+ }
+ select {
+ case <-ctx.Done():
+ return true
+ case <-time.After(interval):
+ err = f()
+ }
+ }
+}
+
+func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) {
+ k8sVer, err := client.Discovery().ServerVersion()
+ if err != nil {
+ return false, err
+ }
+ semVer, err := utilversion.ParseSemantic(k8sVer.String())
+ if err != nil {
+ return false, err
+ }
+ // networking.k8s.io/v1 is available since Kubernetes v1.19
+ // https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md
+ return semVer.Major() >= 1 && semVer.Minor() >= 19, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go
index d196358df..668686fba 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go
@@ -18,8 +18,8 @@ import (
"net"
"strconv"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
apiv1 "k8s.io/api/core/v1"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go
index 86fa31f7f..b5c94862a 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go
@@ -19,8 +19,8 @@ import (
"strconv"
"strings"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
apiv1 "k8s.io/api/core/v1"
@@ -124,12 +124,12 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
send(ctx, ch, &targetgroup.Group{Source: podSourceFromNamespaceAndName(namespace, name)})
return true
}
- eps, err := convertToPod(o)
+ pod, err := convertToPod(o)
if err != nil {
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err)
return true
}
- send(ctx, ch, p.buildPod(eps))
+ send(ctx, ch, p.buildPod(pod))
return true
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go
index 1b19c07b6..107534540 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go
@@ -18,8 +18,8 @@ import (
"net"
"strconv"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
apiv1 "k8s.io/api/core/v1"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go b/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go
new file mode 100644
index 000000000..21c856857
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go
@@ -0,0 +1,318 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package linode
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/linode/linodego"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/version"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+)
+
+const (
+ linodeLabel = model.MetaLabelPrefix + "linode_"
+ linodeLabelID = linodeLabel + "instance_id"
+ linodeLabelName = linodeLabel + "instance_label"
+ linodeLabelImage = linodeLabel + "image"
+ linodeLabelPrivateIPv4 = linodeLabel + "private_ipv4"
+ linodeLabelPublicIPv4 = linodeLabel + "public_ipv4"
+ linodeLabelPublicIPv6 = linodeLabel + "public_ipv6"
+ linodeLabelPrivateIPv4RDNS = linodeLabel + "private_ipv4_rdns"
+ linodeLabelPublicIPv4RDNS = linodeLabel + "public_ipv4_rdns"
+ linodeLabelPublicIPv6RDNS = linodeLabel + "public_ipv6_rdns"
+ linodeLabelRegion = linodeLabel + "region"
+ linodeLabelType = linodeLabel + "type"
+ linodeLabelStatus = linodeLabel + "status"
+ linodeLabelTags = linodeLabel + "tags"
+ linodeLabelGroup = linodeLabel + "group"
+ linodeLabelHypervisor = linodeLabel + "hypervisor"
+ linodeLabelBackups = linodeLabel + "backups"
+ linodeLabelSpecsDiskBytes = linodeLabel + "specs_disk_bytes"
+ linodeLabelSpecsMemoryBytes = linodeLabel + "specs_memory_bytes"
+ linodeLabelSpecsVCPUs = linodeLabel + "specs_vcpus"
+ linodeLabelSpecsTransferBytes = linodeLabel + "specs_transfer_bytes"
+ linodeLabelExtraIPs = linodeLabel + "extra_ips"
+
+ // This is our events filter; when polling for changes, we care only about
+ // events since our last refresh.
+ // Docs: https://www.linode.com/docs/api/account/#events-list
+ filterTemplate = `{"created": {"+gte": "%s"}}`
+)
+
+// DefaultSDConfig is the default Linode SD configuration.
+var DefaultSDConfig = SDConfig{
+ TagSeparator: ",",
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+}
+
+func init() {
+ discovery.RegisterConfig(&SDConfig{})
+}
+
+// SDConfig is the configuration for Linode based service discovery.
+type SDConfig struct {
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+
+ RefreshInterval model.Duration `yaml:"refresh_interval"`
+ Port int `yaml:"port"`
+ TagSeparator string `yaml:"tag_separator,omitempty"`
+}
+
+// Name returns the name of the Config.
+func (*SDConfig) Name() string { return "linode" }
+
+// NewDiscoverer returns a Discoverer for the Config.
+func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewDiscovery(c, opts.Logger)
+}
+
+// SetDirectory joins any relative file paths with dir.
+func (c *SDConfig) SetDirectory(dir string) {
+ c.HTTPClientConfig.SetDirectory(dir)
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultSDConfig
+ type plain SDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+ return c.HTTPClientConfig.Validate()
+}
+
+// Discovery periodically performs Linode requests. It implements
+// the Discoverer interface.
+type Discovery struct {
+ *refresh.Discovery
+ client *linodego.Client
+ port int
+ tagSeparator string
+ lastRefreshTimestamp time.Time
+ pollCount int
+ lastResults []*targetgroup.Group
+ eventPollingEnabled bool
+}
+
+// NewDiscovery returns a new Discovery which periodically refreshes its targets.
+func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
+ d := &Discovery{
+ port: conf.Port,
+ tagSeparator: conf.TagSeparator,
+ pollCount: 0,
+ lastRefreshTimestamp: time.Now().UTC(),
+ eventPollingEnabled: true,
+ }
+
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd", config.WithHTTP2Disabled())
+ if err != nil {
+ return nil, err
+ }
+
+ client := linodego.NewClient(
+ &http.Client{
+ Transport: rt,
+ Timeout: time.Duration(conf.RefreshInterval),
+ },
+ )
+ client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version))
+ d.client = &client
+
+ d.Discovery = refresh.NewDiscovery(
+ logger,
+ "linode",
+ time.Duration(conf.RefreshInterval),
+ d.refresh,
+ )
+ return d, nil
+}
+
+func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ needsRefresh := true
+ ts := time.Now().UTC()
+
+ if d.lastResults != nil && d.eventPollingEnabled {
+ // Check to see if there have been any events. If so, refresh our data.
+ opts := linodego.NewListOptions(1, fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")))
+ events, err := d.client.ListEvents(ctx, opts)
+ if err != nil {
+ var e *linodego.Error
+ if errors.As(err, &e) && e.Code == http.StatusUnauthorized {
+ // If we get a 401, the token doesn't have `events:read_only` scope.
+ // Disable event polling and fallback to doing a full refresh every interval.
+ d.eventPollingEnabled = false
+ } else {
+ return nil, err
+ }
+ } else {
+ // Event polling tells us changes the Linode API is aware of. Actions issued outside of the Linode API,
+ // such as issuing a `shutdown` at the VM's console instead of using the API to power off an instance,
+ // can potentially cause us to return stale data. Just in case, trigger a full refresh after ~10 polling
+ // intervals of no events.
+ d.pollCount++
+
+ if len(events) == 0 && d.pollCount < 10 {
+ needsRefresh = false
+ }
+ }
+ }
+
+ if needsRefresh {
+ newData, err := d.refreshData(ctx)
+ if err != nil {
+ return nil, err
+ }
+ d.pollCount = 0
+ d.lastResults = newData
+ }
+
+ d.lastRefreshTimestamp = ts
+
+ return d.lastResults, nil
+}
+
+func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, error) {
+ tg := &targetgroup.Group{
+ Source: "Linode",
+ }
+
+ // Gather all linode instances.
+ instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ // Gather detailed IP address info for all IPs on all linode instances.
+ detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ for _, instance := range instances {
+ if len(instance.IPv4) == 0 {
+ continue
+ }
+
+ var (
+ privateIPv4, publicIPv4, publicIPv6 string
+ privateIPv4RDNS, publicIPv4RDNS, publicIPv6RDNS string
+ backupsStatus string
+ extraIPs []string
+ )
+
+ for _, ip := range instance.IPv4 {
+ for _, detailedIP := range detailedIPs {
+ if detailedIP.Address != ip.String() {
+ continue
+ }
+
+ if detailedIP.Public && publicIPv4 == "" {
+ publicIPv4 = detailedIP.Address
+
+ if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
+ publicIPv4RDNS = detailedIP.RDNS
+ }
+ } else if !detailedIP.Public && privateIPv4 == "" {
+ privateIPv4 = detailedIP.Address
+
+ if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
+ privateIPv4RDNS = detailedIP.RDNS
+ }
+ } else {
+ extraIPs = append(extraIPs, detailedIP.Address)
+ }
+ }
+ }
+
+ if instance.IPv6 != "" {
+ for _, detailedIP := range detailedIPs {
+ if detailedIP.Address != strings.Split(instance.IPv6, "/")[0] {
+ continue
+ }
+
+ publicIPv6 = detailedIP.Address
+
+ if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
+ publicIPv6RDNS = detailedIP.RDNS
+ }
+ }
+ }
+
+ if instance.Backups.Enabled {
+ backupsStatus = "enabled"
+ } else {
+ backupsStatus = "disabled"
+ }
+
+ labels := model.LabelSet{
+ linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)),
+ linodeLabelName: model.LabelValue(instance.Label),
+ linodeLabelImage: model.LabelValue(instance.Image),
+ linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
+ linodeLabelPublicIPv4: model.LabelValue(publicIPv4),
+ linodeLabelPublicIPv6: model.LabelValue(publicIPv6),
+ linodeLabelPrivateIPv4RDNS: model.LabelValue(privateIPv4RDNS),
+ linodeLabelPublicIPv4RDNS: model.LabelValue(publicIPv4RDNS),
+ linodeLabelPublicIPv6RDNS: model.LabelValue(publicIPv6RDNS),
+ linodeLabelRegion: model.LabelValue(instance.Region),
+ linodeLabelType: model.LabelValue(instance.Type),
+ linodeLabelStatus: model.LabelValue(instance.Status),
+ linodeLabelGroup: model.LabelValue(instance.Group),
+ linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
+ linodeLabelBackups: model.LabelValue(backupsStatus),
+ linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Disk<<20)),
+ linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Memory<<20)),
+ linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)),
+ linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Transfer<<20)),
+ }
+
+ addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))
+ labels[model.AddressLabel] = model.LabelValue(addr)
+
+ if len(instance.Tags) > 0 {
+ // We surround the separated list with the separator as well. This way regular expressions
+ // in relabeling rules don't have to consider tag positions.
+ tags := d.tagSeparator + strings.Join(instance.Tags, d.tagSeparator) + d.tagSeparator
+ labels[linodeLabelTags] = model.LabelValue(tags)
+ }
+
+ if len(extraIPs) > 0 {
+ // This instance has more than one of at least one type of IP address (public, private,
+ // IPv4, IPv6, etc. We provide those extra IPs found here just like we do for instance
+ // tags, we surround a separated list with the tagSeparator config.
+ ips := d.tagSeparator + strings.Join(extraIPs, d.tagSeparator) + d.tagSeparator
+ labels[linodeLabelExtraIPs] = model.LabelValue(ips)
+ }
+
+ tg.Targets = append(tg.Targets, labels)
+ }
+ return []*targetgroup.Group{tg}, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go
index e50d2ee1a..b3dae5c59 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/manager.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go
@@ -20,8 +20,8 @@ import (
"sync"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery/targetgroup"
@@ -279,15 +279,17 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
defer m.mtx.RUnlock()
tSets := map[string][]*targetgroup.Group{}
+ n := map[string]int{}
for pkey, tsets := range m.targets {
- var n int
for _, tg := range tsets {
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
// to signal that it needs to stop all scrape loops for this target set.
tSets[pkey.setName] = append(tSets[pkey.setName], tg)
- n += len(tg.Targets)
+ n[pkey.setName] += len(tg.Targets)
}
- discoveredTargets.WithLabelValues(m.name, pkey.setName).Set(float64(n))
+ }
+ for setName, v := range n {
+ discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v))
}
return tSets
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go
index efd4769e3..586245bc4 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go
@@ -26,7 +26,7 @@ import (
"strings"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -61,7 +61,8 @@ const (
// DefaultSDConfig is the default Marathon SD configuration.
var DefaultSDConfig = SDConfig{
- RefreshInterval: model.Duration(30 * time.Second),
+ RefreshInterval: model.Duration(30 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -111,6 +112,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured")
}
+ if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
+ return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured")
+ }
return c.HTTPClientConfig.Validate()
}
@@ -127,7 +131,7 @@ type Discovery struct {
// NewDiscovery returns a new Marathon Discovery.
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
- rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", false, false)
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", config.WithHTTP2Disabled())
if err != nil {
return nil, err
}
@@ -335,8 +339,13 @@ func fetchApps(ctx context.Context, client *http.Client, url string) (*appList,
return nil, errors.Errorf("non 2xx status '%v' response during marathon service discovery", resp.StatusCode)
}
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
var apps appList
- err = json.NewDecoder(resp.Body).Decode(&apps)
+ err = json.Unmarshal(b, &apps)
if err != nil {
return nil, errors.Wrapf(err, "%q", url)
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go b/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go
new file mode 100644
index 000000000..98dc42acb
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go
@@ -0,0 +1,268 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package moby
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/client"
+ "github.com/go-kit/log"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+const (
+ dockerLabel = model.MetaLabelPrefix + "docker_"
+ dockerLabelContainerPrefix = dockerLabel + "container_"
+ dockerLabelContainerID = dockerLabelContainerPrefix + "id"
+ dockerLabelContainerName = dockerLabelContainerPrefix + "name"
+ dockerLabelContainerNetworkMode = dockerLabelContainerPrefix + "network_mode"
+ dockerLabelContainerLabelPrefix = dockerLabelContainerPrefix + "label_"
+ dockerLabelNetworkPrefix = dockerLabel + "network_"
+ dockerLabelNetworkIP = dockerLabelNetworkPrefix + "ip"
+ dockerLabelPortPrefix = dockerLabel + "port_"
+ dockerLabelPortPrivate = dockerLabelPortPrefix + "private"
+ dockerLabelPortPublic = dockerLabelPortPrefix + "public"
+ dockerLabelPortPublicIP = dockerLabelPortPrefix + "public_ip"
+)
+
+// DefaultDockerSDConfig is the default Docker SD configuration.
+var DefaultDockerSDConfig = DockerSDConfig{
+ RefreshInterval: model.Duration(60 * time.Second),
+ Port: 80,
+ Filters: []Filter{},
+ HostNetworkingHost: "localhost",
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+}
+
+func init() {
+ discovery.RegisterConfig(&DockerSDConfig{})
+}
+
+// DockerSDConfig is the configuration for Docker (non-swarm) based service discovery.
+type DockerSDConfig struct {
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+
+ Host string `yaml:"host"`
+ Port int `yaml:"port"`
+ Filters []Filter `yaml:"filters"`
+ HostNetworkingHost string `yaml:"host_networking_host"`
+
+ RefreshInterval model.Duration `yaml:"refresh_interval"`
+}
+
+// Name returns the name of the Config.
+func (*DockerSDConfig) Name() string { return "docker" }
+
+// NewDiscoverer returns a Discoverer for the Config.
+func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewDockerDiscovery(c, opts.Logger)
+}
+
+// SetDirectory joins any relative file paths with dir.
+func (c *DockerSDConfig) SetDirectory(dir string) {
+ c.HTTPClientConfig.SetDirectory(dir)
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultDockerSDConfig
+ type plain DockerSDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+ if c.Host == "" {
+ return fmt.Errorf("host missing")
+ }
+ if _, err = url.Parse(c.Host); err != nil {
+ return err
+ }
+ return c.HTTPClientConfig.Validate()
+}
+
+type DockerDiscovery struct {
+ *refresh.Discovery
+ client *client.Client
+ port int
+ hostNetworkingHost string
+ filters filters.Args
+}
+
+// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
+func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscovery, error) {
+ var err error
+
+ d := &DockerDiscovery{
+ port: conf.Port,
+ hostNetworkingHost: conf.HostNetworkingHost,
+ }
+
+ hostURL, err := url.Parse(conf.Host)
+ if err != nil {
+ return nil, err
+ }
+
+ opts := []client.Opt{
+ client.WithHost(conf.Host),
+ client.WithAPIVersionNegotiation(),
+ }
+
+ d.filters = filters.NewArgs()
+ for _, f := range conf.Filters {
+ for _, v := range f.Values {
+ d.filters.Add(f.Name, v)
+ }
+ }
+
+ // There are other protocols than HTTP supported by the Docker daemon, like
+ // unix, which are not supported by the HTTP client. Passing HTTP client
+ // options to the Docker client makes those non-HTTP requests fail.
+ if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd", config.WithHTTP2Disabled())
+ if err != nil {
+ return nil, err
+ }
+ opts = append(opts,
+ client.WithHTTPClient(&http.Client{
+ Transport: rt,
+ Timeout: time.Duration(conf.RefreshInterval),
+ }),
+ client.WithScheme(hostURL.Scheme),
+ client.WithHTTPHeaders(map[string]string{
+ "User-Agent": userAgent,
+ }),
+ )
+ }
+
+ d.client, err = client.NewClientWithOpts(opts...)
+ if err != nil {
+ return nil, fmt.Errorf("error setting up docker client: %w", err)
+ }
+
+ d.Discovery = refresh.NewDiscovery(
+ logger,
+ "docker",
+ time.Duration(conf.RefreshInterval),
+ d.refresh,
+ )
+ return d, nil
+}
+
+func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ tg := &targetgroup.Group{
+ Source: "Docker",
+ }
+
+ containers, err := d.client.ContainerList(ctx, types.ContainerListOptions{Filters: d.filters})
+ if err != nil {
+ return nil, fmt.Errorf("error while listing containers: %w", err)
+ }
+
+ networkLabels, err := getNetworksLabels(ctx, d.client, dockerLabel)
+ if err != nil {
+ return nil, fmt.Errorf("error while computing network labels: %w", err)
+ }
+
+ for _, c := range containers {
+ if len(c.Names) == 0 {
+ continue
+ }
+
+ commonLabels := map[string]string{
+ dockerLabelContainerID: c.ID,
+ dockerLabelContainerName: c.Names[0],
+ dockerLabelContainerNetworkMode: c.HostConfig.NetworkMode,
+ }
+
+ for k, v := range c.Labels {
+ ln := strutil.SanitizeLabelName(k)
+ commonLabels[dockerLabelContainerLabelPrefix+ln] = v
+ }
+
+ for _, n := range c.NetworkSettings.Networks {
+ var added bool
+
+ for _, p := range c.Ports {
+ if p.Type != "tcp" {
+ continue
+ }
+
+ labels := model.LabelSet{
+ dockerLabelNetworkIP: model.LabelValue(n.IPAddress),
+ dockerLabelPortPrivate: model.LabelValue(strconv.FormatUint(uint64(p.PrivatePort), 10)),
+ }
+
+ if p.PublicPort > 0 {
+ labels[dockerLabelPortPublic] = model.LabelValue(strconv.FormatUint(uint64(p.PublicPort), 10))
+ labels[dockerLabelPortPublicIP] = model.LabelValue(p.IP)
+ }
+
+ for k, v := range commonLabels {
+ labels[model.LabelName(k)] = model.LabelValue(v)
+ }
+
+ for k, v := range networkLabels[n.NetworkID] {
+ labels[model.LabelName(k)] = model.LabelValue(v)
+ }
+
+ addr := net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(p.PrivatePort), 10))
+ labels[model.AddressLabel] = model.LabelValue(addr)
+ tg.Targets = append(tg.Targets, labels)
+ added = true
+ }
+
+ if !added {
+ // Use fallback port when no exposed ports are available or if all are non-TCP
+ labels := model.LabelSet{
+ dockerLabelNetworkIP: model.LabelValue(n.IPAddress),
+ }
+
+ for k, v := range commonLabels {
+ labels[model.LabelName(k)] = model.LabelValue(v)
+ }
+
+ for k, v := range networkLabels[n.NetworkID] {
+ labels[model.LabelName(k)] = model.LabelValue(v)
+ }
+
+ // Containers in host networking mode don't have ports,
+ // so they only end up here, not in the previous loop.
+ var addr string
+ if c.HostConfig.NetworkMode != "host" {
+ addr = net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(d.port), 10))
+ } else {
+ addr = d.hostNetworkingHost
+ }
+
+ labels[model.AddressLabel] = model.LabelValue(addr)
+ tg.Targets = append(tg.Targets, labels)
+ }
+ }
+ }
+
+ return []*targetgroup.Group{tg}, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go b/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go
similarity index 80%
rename from vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go
rename to vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go
index 2e0b477cd..a9eabfd72 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package dockerswarm
+package moby
import (
"context"
@@ -22,7 +22,7 @@ import (
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
@@ -38,19 +38,20 @@ const (
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
-// DefaultSDConfig is the default Docker Swarm SD configuration.
-var DefaultSDConfig = SDConfig{
- RefreshInterval: model.Duration(60 * time.Second),
- Port: 80,
- Filters: []Filter{},
+// DefaultDockerSwarmSDConfig is the default Docker Swarm SD configuration.
+var DefaultDockerSwarmSDConfig = DockerSwarmSDConfig{
+ RefreshInterval: model.Duration(60 * time.Second),
+ Port: 80,
+ Filters: []Filter{},
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
- discovery.RegisterConfig(&SDConfig{})
+ discovery.RegisterConfig(&DockerSwarmSDConfig{})
}
-// SDConfig is the configuration for Docker Swarm based service discovery.
-type SDConfig struct {
+// DockerSwarmSDConfig is the configuration for Docker Swarm based service discovery.
+type DockerSwarmSDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
Host string `yaml:"host"`
@@ -69,22 +70,22 @@ type Filter struct {
}
// Name returns the name of the Config.
-func (*SDConfig) Name() string { return "dockerswarm" }
+func (*DockerSwarmSDConfig) Name() string { return "dockerswarm" }
// NewDiscoverer returns a Discoverer for the Config.
-func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+func (c *DockerSwarmSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
-func (c *SDConfig) SetDirectory(dir string) {
+func (c *DockerSwarmSDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
- *c = DefaultSDConfig
- type plain SDConfig
+func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultDockerSwarmSDConfig
+ type plain DockerSwarmSDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
@@ -102,7 +103,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
default:
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
}
- return nil
+ return c.HTTPClientConfig.Validate()
}
// Discovery periodically performs Docker Swarm requests. It implements
@@ -116,7 +117,7 @@ type Discovery struct {
}
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
-func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
+func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, error) {
var err error
d := &Discovery{
@@ -145,7 +146,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
// unix, which are not supported by the HTTP client. Passing HTTP client
// options to the Docker client makes those non-HTTP requests fail.
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
- rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", false, false)
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", config.WithHTTP2Disabled())
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/network.go b/vendor/github.com/prometheus/prometheus/discovery/moby/network.go
new file mode 100644
index 000000000..3982e5777
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/moby/network.go
@@ -0,0 +1,56 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package moby
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+const (
+ labelNetworkPrefix = "network_"
+ labelNetworkID = labelNetworkPrefix + "id"
+ labelNetworkName = labelNetworkPrefix + "name"
+ labelNetworkScope = labelNetworkPrefix + "scope"
+ labelNetworkInternal = labelNetworkPrefix + "internal"
+ labelNetworkIngress = labelNetworkPrefix + "ingress"
+ labelNetworkLabelPrefix = labelNetworkPrefix + "label_"
+)
+
+func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) {
+ networks, err := client.NetworkList(ctx, types.NetworkListOptions{})
+ if err != nil {
+ return nil, err
+ }
+ labels := make(map[string]map[string]string, len(networks))
+ for _, network := range networks {
+ labels[network.ID] = map[string]string{
+ labelPrefix + labelNetworkID: network.ID,
+ labelPrefix + labelNetworkName: network.Name,
+ labelPrefix + labelNetworkScope: network.Scope,
+ labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal),
+ labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
+ }
+ for k, v := range network.Labels {
+ ln := strutil.SanitizeLabelName(k)
+ labels[network.ID][labelPrefix+labelNetworkLabelPrefix+ln] = v
+ }
+ }
+
+ return labels, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/nodes.go b/vendor/github.com/prometheus/prometheus/discovery/moby/nodes.go
similarity index 99%
rename from vendor/github.com/prometheus/prometheus/discovery/dockerswarm/nodes.go
rename to vendor/github.com/prometheus/prometheus/discovery/moby/nodes.go
index 79727a949..85092f907 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/nodes.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/moby/nodes.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package dockerswarm
+package moby
import (
"context"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/services.go b/vendor/github.com/prometheus/prometheus/discovery/moby/services.go
similarity index 98%
rename from vendor/github.com/prometheus/prometheus/discovery/dockerswarm/services.go
rename to vendor/github.com/prometheus/prometheus/discovery/moby/services.go
index ae46bfd4d..1d472b5c0 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/services.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/moby/services.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package dockerswarm
+package moby
import (
"context"
@@ -51,7 +51,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
return nil, fmt.Errorf("error while listing swarm services: %w", err)
}
- networkLabels, err := d.getNetworksLabels(ctx)
+ networkLabels, err := getNetworksLabels(ctx, d.client, swarmLabel)
if err != nil {
return nil, fmt.Errorf("error while computing swarm network labels: %w", err)
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/tasks.go b/vendor/github.com/prometheus/prometheus/discovery/moby/tasks.go
similarity index 98%
rename from vendor/github.com/prometheus/prometheus/discovery/dockerswarm/tasks.go
rename to vendor/github.com/prometheus/prometheus/discovery/moby/tasks.go
index 04ced3d11..002f536d0 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/tasks.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/moby/tasks.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package dockerswarm
+package moby
import (
"context"
@@ -58,7 +58,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
return nil, fmt.Errorf("error while computing nodes labels and ports: %w", err)
}
- networkLabels, err := d.getNetworksLabels(ctx)
+ networkLabels, err := getNetworksLabels(ctx, d.client, swarmLabel)
if err != nil {
return nil, fmt.Errorf("error while computing swarm network labels: %w", err)
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go
index 81d0e10c9..594f4e433 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go
@@ -18,7 +18,7 @@ import (
"fmt"
"net"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
@@ -74,7 +74,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
}
// OpenStack API reference
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
- pagerHypervisors := hypervisors.List(client)
+ pagerHypervisors := hypervisors.List(client, nil)
err = pagerHypervisors.EachPage(func(page pagination.Page) (bool, error) {
hypervisorList, err := hypervisors.ExtractHypervisors(page)
if err != nil {
diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go
index a53d6274e..ab2221f4f 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go
@@ -18,8 +18,8 @@ import (
"fmt"
"net"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go
index c0e49f61e..2a341976e 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go
@@ -19,7 +19,7 @@ import (
"net/http"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
conntrack "github.com/mwitkow/go-conntrack"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go
index c48524c50..1b42a70c5 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go
@@ -17,8 +17,8 @@ import (
"context"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery/targetgroup"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/scaleway/baremetal.go b/vendor/github.com/prometheus/prometheus/discovery/scaleway/baremetal.go
new file mode 100644
index 000000000..066c0b604
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/scaleway/baremetal.go
@@ -0,0 +1,200 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scaleway
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/version"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/scaleway/scaleway-sdk-go/api/baremetal/v1"
+ "github.com/scaleway/scaleway-sdk-go/scw"
+)
+
+type baremetalDiscovery struct {
+ *refresh.Discovery
+ client *scw.Client
+ port int
+ zone string
+ project string
+ accessKey string
+ secretKey string
+ nameFilter string
+ tagsFilter []string
+}
+
+const (
+ baremetalLabelPrefix = metaLabelPrefix + "baremetal_"
+
+ baremetalIDLabel = baremetalLabelPrefix + "id"
+ baremetalPublicIPv4Label = baremetalLabelPrefix + "public_ipv4"
+ baremetalPublicIPv6Label = baremetalLabelPrefix + "public_ipv6"
+ baremetalNameLabel = baremetalLabelPrefix + "name"
+ baremetalOSNameLabel = baremetalLabelPrefix + "os_name"
+ baremetalOSVersionLabel = baremetalLabelPrefix + "os_version"
+ baremetalProjectLabel = baremetalLabelPrefix + "project_id"
+ baremetalStatusLabel = baremetalLabelPrefix + "status"
+ baremetalTagsLabel = baremetalLabelPrefix + "tags"
+ baremetalTypeLabel = baremetalLabelPrefix + "type"
+ baremetalZoneLabel = baremetalLabelPrefix + "zone"
+)
+
+func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) {
+ d := &baremetalDiscovery{
+ port: conf.Port,
+ zone: conf.Zone,
+ project: conf.Project,
+ accessKey: conf.AccessKey,
+ secretKey: string(conf.SecretKey),
+ nameFilter: conf.NameFilter,
+ tagsFilter: conf.TagsFilter,
+ }
+
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
+ if err != nil {
+ return nil, err
+ }
+
+ if conf.SecretKeyFile != "" {
+ rt, err = newAuthTokenFileRoundTripper(conf.SecretKeyFile, rt)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ profile, err := loadProfile(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ d.client, err = scw.NewClient(
+ scw.WithHTTPClient(&http.Client{
+ Transport: rt,
+ Timeout: time.Duration(conf.RefreshInterval),
+ }),
+ scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)),
+ scw.WithProfile(profile),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error setting up scaleway client: %w", err)
+ }
+
+ return d, nil
+}
+
+func (d *baremetalDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ api := baremetal.NewAPI(d.client)
+
+ req := &baremetal.ListServersRequest{}
+
+ if d.nameFilter != "" {
+ req.Name = scw.StringPtr(d.nameFilter)
+ }
+
+ if d.tagsFilter != nil {
+ req.Tags = d.tagsFilter
+ }
+
+ servers, err := api.ListServers(req, scw.WithAllPages(), scw.WithContext(ctx))
+ if err != nil {
+ return nil, err
+ }
+
+ offers, err := api.ListOffers(&baremetal.ListOffersRequest{}, scw.WithAllPages())
+ if err != nil {
+ return nil, err
+ }
+
+ osFullList, err := api.ListOS(&baremetal.ListOSRequest{}, scw.WithAllPages())
+ if err != nil {
+ return nil, err
+ }
+
+ var targets []model.LabelSet
+ for _, server := range servers.Servers {
+ labels := model.LabelSet{
+ baremetalIDLabel: model.LabelValue(server.ID),
+ baremetalNameLabel: model.LabelValue(server.Name),
+ baremetalZoneLabel: model.LabelValue(server.Zone.String()),
+ baremetalStatusLabel: model.LabelValue(server.Status),
+ baremetalProjectLabel: model.LabelValue(server.ProjectID),
+ }
+
+ for _, offer := range offers.Offers {
+ if server.OfferID == offer.ID {
+ labels[baremetalTypeLabel] = model.LabelValue(offer.Name)
+ break
+ }
+ }
+
+ if server.Install != nil {
+ for _, os := range osFullList.Os {
+ if server.Install.OsID == os.ID {
+ labels[baremetalOSNameLabel] = model.LabelValue(os.Name)
+ labels[baremetalOSVersionLabel] = model.LabelValue(os.Version)
+ break
+ }
+ }
+ }
+
+ if len(server.Tags) > 0 {
+ // We surround the separated list with the separator as well. This way regular expressions
+ // in relabeling rules don't have to consider tag positions.
+ tags := separator + strings.Join(server.Tags, separator) + separator
+ labels[baremetalTagsLabel] = model.LabelValue(tags)
+ }
+
+ for _, ip := range server.IPs {
+ switch v := ip.Version.String(); v {
+ case "IPv4":
+ if _, ok := labels[baremetalPublicIPv4Label]; ok {
+ // If the server has multiple IPv4, we only take the first one.
+ // This should not happen.
+ continue
+ }
+ labels[baremetalPublicIPv4Label] = model.LabelValue(ip.Address.String())
+
+ // We always default the __address__ to IPv4.
+ addr := net.JoinHostPort(ip.Address.String(), strconv.FormatUint(uint64(d.port), 10))
+ labels[model.AddressLabel] = model.LabelValue(addr)
+ case "IPv6":
+ if _, ok := labels[baremetalPublicIPv6Label]; ok {
+ // If the server has multiple IPv6, we only take the first one.
+ // This should not happen.
+ continue
+ }
+ labels[baremetalPublicIPv6Label] = model.LabelValue(ip.Address.String())
+ if _, ok := labels[model.AddressLabel]; !ok {
+ // This server does not have an IPv4 or we have not parsed it
+ // yet.
+ addr := net.JoinHostPort(ip.Address.String(), strconv.FormatUint(uint64(d.port), 10))
+ labels[model.AddressLabel] = model.LabelValue(addr)
+ }
+ default:
+ return nil, fmt.Errorf("unknown IP version: %s", v)
+ }
+ }
+ targets = append(targets, labels)
+ }
+ return []*targetgroup.Group{{Source: "scaleway", Targets: targets}}, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/scaleway/instance.go b/vendor/github.com/prometheus/prometheus/discovery/scaleway/instance.go
new file mode 100644
index 000000000..8b2603165
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/scaleway/instance.go
@@ -0,0 +1,196 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scaleway
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/version"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/scaleway/scaleway-sdk-go/api/instance/v1"
+ "github.com/scaleway/scaleway-sdk-go/scw"
+)
+
+const (
+ instanceLabelPrefix = metaLabelPrefix + "instance_"
+
+ instanceBootTypeLabel = instanceLabelPrefix + "boot_type"
+ instanceHostnameLabel = instanceLabelPrefix + "hostname"
+ instanceIDLabel = instanceLabelPrefix + "id"
+ instanceImageArchLabel = instanceLabelPrefix + "image_arch"
+ instanceImageIDLabel = instanceLabelPrefix + "image_id"
+ instanceImageNameLabel = instanceLabelPrefix + "image_name"
+ instanceLocationClusterID = instanceLabelPrefix + "location_cluster_id"
+ instanceLocationHypervisorID = instanceLabelPrefix + "location_hypervisor_id"
+ instanceLocationNodeID = instanceLabelPrefix + "location_node_id"
+ instanceNameLabel = instanceLabelPrefix + "name"
+ instanceOrganizationLabel = instanceLabelPrefix + "organization_id"
+ instancePrivateIPv4Label = instanceLabelPrefix + "private_ipv4"
+ instanceProjectLabel = instanceLabelPrefix + "project_id"
+ instancePublicIPv4Label = instanceLabelPrefix + "public_ipv4"
+ instancePublicIPv6Label = instanceLabelPrefix + "public_ipv6"
+ instanceSecurityGroupIDLabel = instanceLabelPrefix + "security_group_id"
+ instanceSecurityGroupNameLabel = instanceLabelPrefix + "security_group_name"
+ instanceStateLabel = instanceLabelPrefix + "status"
+ instanceTagsLabel = instanceLabelPrefix + "tags"
+ instanceTypeLabel = instanceLabelPrefix + "type"
+ instanceZoneLabel = instanceLabelPrefix + "zone"
+ instanceRegionLabel = instanceLabelPrefix + "region"
+)
+
+type instanceDiscovery struct {
+ *refresh.Discovery
+ client *scw.Client
+ port int
+ zone string
+ project string
+ accessKey string
+ secretKey string
+ nameFilter string
+ tagsFilter []string
+}
+
+func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) {
+ d := &instanceDiscovery{
+ port: conf.Port,
+ zone: conf.Zone,
+ project: conf.Project,
+ accessKey: conf.AccessKey,
+ secretKey: conf.secretKeyForConfig(),
+ nameFilter: conf.NameFilter,
+ tagsFilter: conf.TagsFilter,
+ }
+
+ rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
+ if err != nil {
+ return nil, err
+ }
+
+ if conf.SecretKeyFile != "" {
+ rt, err = newAuthTokenFileRoundTripper(conf.SecretKeyFile, rt)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ profile, err := loadProfile(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ d.client, err = scw.NewClient(
+ scw.WithHTTPClient(&http.Client{
+ Transport: rt,
+ Timeout: time.Duration(conf.RefreshInterval),
+ }),
+ scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)),
+ scw.WithProfile(profile),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error setting up scaleway client: %w", err)
+ }
+
+ return d, nil
+}
+
+func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ api := instance.NewAPI(d.client)
+
+ req := &instance.ListServersRequest{}
+
+ if d.nameFilter != "" {
+ req.Name = scw.StringPtr(d.nameFilter)
+ }
+
+ if d.tagsFilter != nil {
+ req.Tags = d.tagsFilter
+ }
+
+ servers, err := api.ListServers(req, scw.WithAllPages(), scw.WithContext(ctx))
+ if err != nil {
+ return nil, err
+ }
+
+ var targets []model.LabelSet
+ for _, server := range servers.Servers {
+ labels := model.LabelSet{
+ instanceBootTypeLabel: model.LabelValue(server.BootType),
+ instanceHostnameLabel: model.LabelValue(server.Hostname),
+ instanceIDLabel: model.LabelValue(server.ID),
+ instanceNameLabel: model.LabelValue(server.Name),
+ instanceOrganizationLabel: model.LabelValue(server.Organization),
+ instanceProjectLabel: model.LabelValue(server.Project),
+ instanceStateLabel: model.LabelValue(server.State),
+ instanceTypeLabel: model.LabelValue(server.CommercialType),
+ instanceZoneLabel: model.LabelValue(server.Zone.String()),
+ }
+
+ if server.Image != nil {
+ labels[instanceImageArchLabel] = model.LabelValue(server.Image.Arch)
+ labels[instanceImageIDLabel] = model.LabelValue(server.Image.ID)
+ labels[instanceImageNameLabel] = model.LabelValue(server.Image.Name)
+ }
+
+ if server.Location != nil {
+ labels[instanceLocationClusterID] = model.LabelValue(server.Location.ClusterID)
+ labels[instanceLocationHypervisorID] = model.LabelValue(server.Location.HypervisorID)
+ labels[instanceLocationNodeID] = model.LabelValue(server.Location.NodeID)
+ }
+
+ if server.SecurityGroup != nil {
+ labels[instanceSecurityGroupIDLabel] = model.LabelValue(server.SecurityGroup.ID)
+ labels[instanceSecurityGroupNameLabel] = model.LabelValue(server.SecurityGroup.Name)
+ }
+
+ if region, err := server.Zone.Region(); err == nil {
+ labels[instanceRegionLabel] = model.LabelValue(region.String())
+ }
+
+ if len(server.Tags) > 0 {
+ // We surround the separated list with the separator as well. This way regular expressions
+ // in relabeling rules don't have to consider tag positions.
+ tags := separator + strings.Join(server.Tags, separator) + separator
+ labels[instanceTagsLabel] = model.LabelValue(tags)
+ }
+
+ if server.IPv6 != nil {
+ labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String())
+ }
+
+ if server.PublicIP != nil {
+ labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String())
+ }
+
+ if server.PrivateIP != nil {
+ labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP)
+
+ addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10))
+ labels[model.AddressLabel] = model.LabelValue(addr)
+
+ targets = append(targets, labels)
+ }
+
+ }
+
+ return []*targetgroup.Group{{Source: "scaleway", Targets: targets}}, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/scaleway/scaleway.go b/vendor/github.com/prometheus/prometheus/discovery/scaleway/scaleway.go
new file mode 100644
index 000000000..c8689cb94
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/scaleway/scaleway.go
@@ -0,0 +1,245 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scaleway
+
+import (
+ "context"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/scaleway/scaleway-sdk-go/scw"
+)
+
+// metaLabelPrefix is the meta prefix used for all meta labels.
+// in this discovery.
+const (
+ metaLabelPrefix = model.MetaLabelPrefix + "scaleway_"
+ separator = ","
+)
+
+// role is the role of the target within the Scaleway Ecosystem.
+type role string
+
+// The valid options for role.
+const (
+ // Scaleway Elements Baremetal
+ // https://www.scaleway.com/en/bare-metal-servers/
+ roleBaremetal role = "baremetal"
+
+ // Scaleway Elements Instance
+ // https://www.scaleway.com/en/virtual-instances/
+ roleInstance role = "instance"
+)
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ if err := unmarshal((*string)(c)); err != nil {
+ return err
+ }
+ switch *c {
+ case roleInstance, roleBaremetal:
+ return nil
+ default:
+ return errors.Errorf("unknown role %q", *c)
+ }
+}
+
+// DefaultSDConfig is the default Scaleway Service Discovery configuration.
+var DefaultSDConfig = SDConfig{
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+ Zone: scw.ZoneFrPar1.String(),
+ APIURL: "https://api.scaleway.com",
+}
+
+type SDConfig struct {
+ // Project: The Scaleway Project ID used to filter discovery on.
+ Project string `yaml:"project_id"`
+
+ // APIURL: URL of the Scaleway API to use.
+ APIURL string `yaml:"api_url,omitempty"`
+ // Zone: The zone of the scrape targets.
+ // If you need to configure multiple zones use multiple scaleway_sd_configs
+ Zone string `yaml:"zone"`
+ // AccessKey used to authenticate on Scaleway APIs.
+ AccessKey string `yaml:"access_key"`
+ // SecretKey used to authenticate on Scaleway APIs.
+ SecretKey config.Secret `yaml:"secret_key"`
+ // SecretKey used to authenticate on Scaleway APIs.
+ SecretKeyFile string `yaml:"secret_key_file"`
+ // NameFilter to filter on during the ListServers.
+ NameFilter string `yaml:"name_filter,omitempty"`
+ // TagsFilter to filter on during the ListServers.
+ TagsFilter []string `yaml:"tags_filter,omitempty"`
+
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+
+ RefreshInterval model.Duration `yaml:"refresh_interval"`
+ Port int `yaml:"port"`
+ // Role can be either instance or baremetal
+ Role role `yaml:"role"`
+}
+
+func (c SDConfig) Name() string {
+ return "scaleway"
+}
+
+// secretKeyForConfig returns a secret key that looks like a UUID, even if we
+// take the actual secret from a file.
+func (c SDConfig) secretKeyForConfig() string {
+ if c.SecretKeyFile != "" {
+ return "00000000-0000-0000-0000-000000000000"
+ }
+ return string(c.SecretKey)
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultSDConfig
+ type plain SDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+
+ if c.Role == "" {
+ return errors.New("role missing (one of: instance, baremetal)")
+ }
+
+ if c.Project == "" {
+ return errors.New("project_id is mandatory")
+ }
+
+ if c.SecretKey == "" && c.SecretKeyFile == "" {
+ return errors.New("one of secret_key & secret_key_file must be configured")
+ }
+
+ if c.SecretKey != "" && c.SecretKeyFile != "" {
+ return errors.New("at most one of secret_key & secret_key_file must be configured")
+ }
+
+ if c.AccessKey == "" {
+ return errors.New("access_key is mandatory")
+ }
+
+ profile, err := loadProfile(c)
+ if err != nil {
+ return err
+ }
+ _, err = scw.NewClient(
+ scw.WithProfile(profile),
+ )
+ if err != nil {
+ return err
+ }
+
+ return c.HTTPClientConfig.Validate()
+}
+
+func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewDiscovery(&c, options.Logger)
+}
+
+// SetDirectory joins any relative file paths with dir.
+func (c *SDConfig) SetDirectory(dir string) {
+ c.SecretKeyFile = config.JoinDir(dir, c.SecretKeyFile)
+ c.HTTPClientConfig.SetDirectory(dir)
+}
+
+func init() {
+ discovery.RegisterConfig(&SDConfig{})
+}
+
+// Discovery periodically performs Scaleway requests. It implements
+// the Discoverer interface.
+type Discovery struct {
+}
+
+func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
+ r, err := newRefresher(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ return refresh.NewDiscovery(
+ logger,
+ "scaleway",
+ time.Duration(conf.RefreshInterval),
+ r.refresh,
+ ), nil
+}
+
+type refresher interface {
+ refresh(context.Context) ([]*targetgroup.Group, error)
+}
+
+func newRefresher(conf *SDConfig) (refresher, error) {
+ switch conf.Role {
+ case roleBaremetal:
+ return newBaremetalDiscovery(conf)
+ case roleInstance:
+ return newInstanceDiscovery(conf)
+ }
+ return nil, errors.New("unknown Scaleway discovery role")
+}
+
+func loadProfile(sdConfig *SDConfig) (*scw.Profile, error) {
+ // Profile coming from Prometheus Configuration file
+ prometheusConfigProfile := &scw.Profile{
+ DefaultZone: scw.StringPtr(sdConfig.Zone),
+ APIURL: scw.StringPtr(sdConfig.APIURL),
+ SecretKey: scw.StringPtr(sdConfig.secretKeyForConfig()),
+ AccessKey: scw.StringPtr(sdConfig.AccessKey),
+ DefaultProjectID: scw.StringPtr(sdConfig.Project),
+ SendTelemetry: scw.BoolPtr(false),
+ }
+
+ return prometheusConfigProfile, nil
+}
+
+type authTokenFileRoundTripper struct {
+ authTokenFile string
+ rt http.RoundTripper
+}
+
+// newAuthTokenFileRoundTripper adds the auth token read from the file to a request.
+func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) {
+ // fail-fast if we can't read the file.
+ _, err := ioutil.ReadFile(tokenFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to read auth token file %s", tokenFile)
+ }
+ return &authTokenFileRoundTripper{tokenFile, rt}, nil
+}
+
+func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) {
+ b, err := ioutil.ReadFile(rt.authTokenFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to read auth token file %s", rt.authTokenFile)
+ }
+ authToken := strings.TrimSpace(string(b))
+
+ request.Header.Set("X-Auth-Token", authToken)
+ return rt.rt.RoundTrip(request)
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go b/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go
index 6cd5e0aad..187d31ab6 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go
@@ -24,7 +24,7 @@ import (
"strings"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
diff --git a/vendor/github.com/prometheus/prometheus/discovery/xds/client.go b/vendor/github.com/prometheus/prometheus/discovery/xds/client.go
new file mode 100644
index 000000000..cd8ffb017
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/xds/client.go
@@ -0,0 +1,226 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xds
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "path"
+ "time"
+
+ envoy_core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/version"
+)
+
+var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
+
+// ResourceClient exposes the xDS protocol for a single resource type.
+// See https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#rest-json-polling-subscriptions .
+type ResourceClient interface {
+ // ResourceTypeURL is the type URL of the resource.
+ ResourceTypeURL() string
+
+ // Server is the xDS Management server.
+ Server() string
+
+ // Fetch requests the latest view of the entire resource state.
+ // If no updates have been made since the last request, the response will be nil.
+ Fetch(ctx context.Context) (*v3.DiscoveryResponse, error)
+
+ // ID returns the ID of the client that is sent to the xDS server.
+ ID() string
+
+ // Close releases currently held resources.
+ Close()
+}
+
+type HTTPResourceClient struct {
+ client *http.Client
+ config *HTTPResourceClientConfig
+
+ // endpoint is the fully-constructed xDS HTTP endpoint.
+ endpoint string
+ // Caching.
+ latestVersion string
+ latestNonce string
+}
+
+type HTTPResourceClientConfig struct {
+ // HTTP config.
+ config.HTTPClientConfig
+
+ Name string
+
+ // ExtraQueryParams are extra query parameters to attach to the request URL.
+ ExtraQueryParams url.Values
+
+ // General xDS config.
+
+ // The timeout for a single fetch request.
+ Timeout time.Duration
+
+ // Type is the xds type, e.g., clusters
+ // which is used in the discovery POST request.
+ ResourceType string
+ // ResourceTypeURL is the Google type url for the resource, e.g., type.googleapis.com/envoy.api.v2.Cluster.
+ ResourceTypeURL string
+ // Server is the xDS management server.
+ Server string
+ // ClientID is used to identify the client with the management server.
+ ClientID string
+}
+
+func NewHTTPResourceClient(conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion) (*HTTPResourceClient, error) {
+ if protocolVersion != ProtocolV3 {
+ return nil, errors.New("only the v3 protocol is supported")
+ }
+
+ if len(conf.Server) == 0 {
+ return nil, errors.New("empty xDS server")
+ }
+
+ serverURL, err := url.Parse(conf.Server)
+ if err != nil {
+ return nil, err
+ }
+
+ endpointURL, err := makeXDSResourceHTTPEndpointURL(protocolVersion, serverURL, conf.ResourceType)
+ if err != nil {
+ return nil, err
+ }
+
+ if conf.ExtraQueryParams != nil {
+ endpointURL.RawQuery = conf.ExtraQueryParams.Encode()
+ }
+
+ client, err := config.NewClientFromConfig(conf.HTTPClientConfig, conf.Name, config.WithHTTP2Disabled(), config.WithIdleConnTimeout(conf.Timeout))
+ if err != nil {
+ return nil, err
+ }
+
+ client.Timeout = conf.Timeout
+
+ return &HTTPResourceClient{
+ client: client,
+ config: conf,
+ endpoint: endpointURL.String(),
+ latestVersion: "",
+ latestNonce: "",
+ }, nil
+}
+
+func makeXDSResourceHTTPEndpointURL(protocolVersion ProtocolVersion, serverURL *url.URL, resourceType string) (*url.URL, error) {
+ if serverURL == nil {
+ return nil, errors.New("empty xDS server URL")
+ }
+
+ if len(serverURL.Scheme) == 0 || len(serverURL.Host) == 0 {
+ return nil, errors.New("invalid xDS server URL")
+ }
+
+ if serverURL.Scheme != "http" && serverURL.Scheme != "https" {
+ return nil, errors.New("invalid xDS server URL protocol. must be either 'http' or 'https'")
+ }
+
+ serverURL.Path = path.Join(serverURL.Path, string(protocolVersion), fmt.Sprintf("discovery:%s", resourceType))
+
+ return serverURL, nil
+}
+
+func (rc *HTTPResourceClient) Server() string {
+ return rc.config.Server
+}
+
+func (rc *HTTPResourceClient) ResourceTypeURL() string {
+ return rc.config.ResourceTypeURL
+}
+
+func (rc *HTTPResourceClient) ID() string {
+ return rc.config.ClientID
+}
+
+func (rc *HTTPResourceClient) Close() {
+ rc.client.CloseIdleConnections()
+}
+
+// Fetch requests the latest state of the resources from the xDS server and cache the version.
+// Returns a nil response if the current local version is up to date.
+func (rc *HTTPResourceClient) Fetch(ctx context.Context) (*v3.DiscoveryResponse, error) {
+ discoveryReq := &v3.DiscoveryRequest{
+ VersionInfo: rc.latestVersion,
+ ResponseNonce: rc.latestNonce,
+ TypeUrl: rc.ResourceTypeURL(),
+ ResourceNames: []string{},
+ Node: &envoy_core.Node{
+ Id: rc.ID(),
+ },
+ }
+
+ reqBody, err := protoJSONMarshalOptions.Marshal(discoveryReq)
+ if err != nil {
+ return nil, err
+ }
+
+ request, err := http.NewRequest("POST", rc.endpoint, bytes.NewBuffer(reqBody))
+ if err != nil {
+ return nil, err
+ }
+ request = request.WithContext(ctx)
+
+ request.Header.Add("User-Agent", userAgent)
+ request.Header.Add("Content-Type", "application/json")
+ request.Header.Add("Accept", "application/json")
+
+ resp, err := rc.client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }()
+
+ if resp.StatusCode == http.StatusNotModified {
+ // Empty response, already have the latest.
+ return nil, nil
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("non 200 status '%d' response during xDS fetch", resp.StatusCode)
+ }
+
+ respBody, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ discoveryRes := &v3.DiscoveryResponse{}
+ if err = protoJSONUnmarshalOptions.Unmarshal(respBody, discoveryRes); err != nil {
+ return nil, err
+ }
+
+ // Cache the latest nonce + version info.
+ rc.latestNonce = discoveryRes.Nonce
+ rc.latestVersion = discoveryRes.VersionInfo
+
+ return discoveryRes, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/xds/kuma.go b/vendor/github.com/prometheus/prometheus/discovery/xds/kuma.go
new file mode 100644
index 000000000..77f9f0561
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/xds/kuma.go
@@ -0,0 +1,222 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xds
+
+import (
+ "fmt"
+ "net/url"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/types/known/anypb"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/util/osutil"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+var (
+ // DefaultKumaSDConfig is the default Kuma MADS SD configuration.
+ DefaultKumaSDConfig = KumaSDConfig{
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+ RefreshInterval: model.Duration(15 * time.Second),
+ FetchTimeout: model.Duration(2 * time.Minute),
+ }
+
+ kumaFetchFailuresCount = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Name: "sd_kuma_fetch_failures_total",
+ Help: "The number of Kuma MADS fetch call failures.",
+ })
+ kumaFetchSkipUpdateCount = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Name: "sd_kuma_fetch_skipped_updates_total",
+ Help: "The number of Kuma MADS fetch calls that result in no updates to the targets.",
+ })
+ kumaFetchDuration = prometheus.NewSummary(
+ prometheus.SummaryOpts{
+ Namespace: namespace,
+ Name: "sd_kuma_fetch_duration_seconds",
+ Help: "The duration of a Kuma MADS fetch call.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ )
+)
+
+const (
+ // kumaMetaLabelPrefix is the meta prefix used for all kuma meta labels.
+ kumaMetaLabelPrefix = model.MetaLabelPrefix + "kuma_"
+
+ // kumaMeshLabel is the name of the label that holds the mesh name.
+ kumaMeshLabel = kumaMetaLabelPrefix + "mesh"
+ // kumaServiceLabel is the name of the label that holds the service name.
+ kumaServiceLabel = kumaMetaLabelPrefix + "service"
+ // kumaDataplaneLabel is the name of the label that holds the dataplane name.
+ kumaDataplaneLabel = kumaMetaLabelPrefix + "dataplane"
+ // kumaUserLabelPrefix is the name of the label that namespaces all user-defined labels.
+ kumaUserLabelPrefix = kumaMetaLabelPrefix + "label_"
+)
+
+const (
+ KumaMadsV1ResourceTypeURL = "type.googleapis.com/kuma.observability.v1.MonitoringAssignment"
+ KumaMadsV1ResourceType = "monitoringassignments"
+)
+
+type KumaSDConfig = SDConfig
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ *c = DefaultKumaSDConfig
+ type plainKumaConf KumaSDConfig
+ err := unmarshal((*plainKumaConf)(c))
+ if err != nil {
+ return err
+ }
+
+ if len(c.Server) == 0 {
+ return errors.Errorf("kuma SD server must not be empty: %s", c.Server)
+ }
+ parsedURL, err := url.Parse(c.Server)
+ if err != nil {
+ return err
+ }
+
+ if len(parsedURL.Scheme) == 0 || len(parsedURL.Host) == 0 {
+ return errors.Errorf("kuma SD server must not be empty and have a scheme: %s", c.Server)
+ }
+
+ if err := c.HTTPClientConfig.Validate(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *KumaSDConfig) Name() string {
+ return "kuma"
+}
+
+// SetDirectory joins any relative file paths with dir.
+func (c *KumaSDConfig) SetDirectory(dir string) {
+ c.HTTPClientConfig.SetDirectory(dir)
+}
+
+func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ logger := opts.Logger
+ if logger == nil {
+ logger = log.NewNopLogger()
+ }
+
+ return NewKumaHTTPDiscovery(c, logger)
+}
+
+func convertKumaV1MonitoringAssignment(assignment *MonitoringAssignment) []model.LabelSet {
+ commonLabels := convertKumaUserLabels(assignment.Labels)
+
+ commonLabels[kumaMeshLabel] = model.LabelValue(assignment.Mesh)
+ commonLabels[kumaServiceLabel] = model.LabelValue(assignment.Service)
+
+ var targets []model.LabelSet
+
+ for _, madsTarget := range assignment.Targets {
+ targetLabels := convertKumaUserLabels(madsTarget.Labels).Merge(commonLabels)
+
+ targetLabels[kumaDataplaneLabel] = model.LabelValue(madsTarget.Name)
+ targetLabels[model.AddressLabel] = model.LabelValue(madsTarget.Address)
+ targetLabels[model.InstanceLabel] = model.LabelValue(madsTarget.Name)
+ targetLabels[model.SchemeLabel] = model.LabelValue(madsTarget.Scheme)
+ targetLabels[model.MetricsPathLabel] = model.LabelValue(madsTarget.MetricsPath)
+
+ targets = append(targets, targetLabels)
+ }
+
+ return targets
+}
+
+func convertKumaUserLabels(labels map[string]string) model.LabelSet {
+ labelSet := model.LabelSet{}
+ for key, value := range labels {
+ name := kumaUserLabelPrefix + strutil.SanitizeLabelName(key)
+ labelSet[model.LabelName(name)] = model.LabelValue(value)
+ }
+ return labelSet
+}
+
+// kumaMadsV1ResourceParser is an xds.resourceParser.
+func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.LabelSet, error) {
+ if typeURL != KumaMadsV1ResourceTypeURL {
+ return nil, errors.Errorf("recieved invalid typeURL for Kuma MADS v1 Resource: %s", typeURL)
+ }
+
+ var targets []model.LabelSet
+
+ for _, resource := range resources {
+ assignment := &MonitoringAssignment{}
+
+ if err := anypb.UnmarshalTo(resource, assignment, protoUnmarshalOptions); err != nil {
+ return nil, err
+ }
+
+ targets = append(targets, convertKumaV1MonitoringAssignment(assignment)...)
+ }
+
+ return targets, nil
+}
+
+func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger) (discovery.Discoverer, error) {
+ // Default to "prometheus" if hostname is unavailable.
+ clientID, err := osutil.GetFQDN()
+ if err != nil {
+ level.Debug(logger).Log("msg", "error getting FQDN", "err", err)
+ clientID = "prometheus"
+ }
+
+ clientConfig := &HTTPResourceClientConfig{
+ HTTPClientConfig: conf.HTTPClientConfig,
+ ExtraQueryParams: url.Values{
+ "fetch-timeout": {conf.FetchTimeout.String()},
+ },
+ // Allow 15s of buffer over the timeout sent to the xDS server for connection overhead.
+ Timeout: time.Duration(conf.FetchTimeout) + (15 * time.Second),
+ ResourceType: KumaMadsV1ResourceType,
+ ResourceTypeURL: KumaMadsV1ResourceTypeURL,
+ Server: conf.Server,
+ ClientID: clientID,
+ }
+
+ client, err := NewHTTPResourceClient(clientConfig, ProtocolV3)
+ if err != nil {
+ return nil, fmt.Errorf("kuma_sd: %w", err)
+ }
+
+ d := &fetchDiscovery{
+ client: client,
+ logger: logger,
+ refreshInterval: time.Duration(conf.RefreshInterval),
+ source: "kuma",
+ parseResources: kumaMadsV1ResourceParser,
+ fetchFailuresCount: kumaFetchFailuresCount,
+ fetchSkipUpdateCount: kumaFetchSkipUpdateCount,
+ fetchDuration: kumaFetchDuration,
+ }
+
+ return d, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/xds/kuma_mads.pb.go b/vendor/github.com/prometheus/prometheus/discovery/xds/kuma_mads.pb.go
new file mode 100644
index 000000000..b1079bf23
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/xds/kuma_mads.pb.go
@@ -0,0 +1,398 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.14.0
+// source: observability/v1/mads.proto
+
+// gRPC-removed vendored file from Kuma.
+
+package xds
+
+import (
+ context "context"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// MADS resource type.
+//
+// Describes a group of targets on a single service that need to be monitored.
+type MonitoringAssignment struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Mesh of the dataplane.
+ //
+ // E.g., `default`
+ Mesh string `protobuf:"bytes,2,opt,name=mesh,proto3" json:"mesh,omitempty"`
+ // Identifying service the dataplane is proxying.
+ //
+ // E.g., `backend`
+ Service string `protobuf:"bytes,3,opt,name=service,proto3" json:"service,omitempty"`
+ // List of targets that need to be monitored.
+ Targets []*MonitoringAssignment_Target `protobuf:"bytes,4,rep,name=targets,proto3" json:"targets,omitempty"`
+ // Arbitrary Labels associated with every target in the assignment.
+ //
+ // E.g., `{"zone" : "us-east-1", "team": "infra", "commit_hash": "620506a88"}`.
+ Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *MonitoringAssignment) Reset() {
+ *x = MonitoringAssignment{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_observability_v1_mads_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MonitoringAssignment) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MonitoringAssignment) ProtoMessage() {}
+
+func (x *MonitoringAssignment) ProtoReflect() protoreflect.Message {
+ mi := &file_observability_v1_mads_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MonitoringAssignment.ProtoReflect.Descriptor instead.
+func (*MonitoringAssignment) Descriptor() ([]byte, []int) {
+ return file_observability_v1_mads_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MonitoringAssignment) GetMesh() string {
+ if x != nil {
+ return x.Mesh
+ }
+ return ""
+}
+
+func (x *MonitoringAssignment) GetService() string {
+ if x != nil {
+ return x.Service
+ }
+ return ""
+}
+
+func (x *MonitoringAssignment) GetTargets() []*MonitoringAssignment_Target {
+ if x != nil {
+ return x.Targets
+ }
+ return nil
+}
+
+func (x *MonitoringAssignment) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+// Describes a single target that needs to be monitored.
+type MonitoringAssignment_Target struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // E.g., `backend-01`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Scheme on which to scrape the target.
+ //E.g., `http`
+ Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"`
+ // Address (preferably IP) for the service
+ // E.g., `backend.svc` or `10.1.4.32:9090`
+ Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
+ // Optional path to append to the address for scraping
+ //E.g., `/metrics`
+ MetricsPath string `protobuf:"bytes,4,opt,name=metrics_path,json=metricsPath,proto3" json:"metrics_path,omitempty"`
+ // Arbitrary labels associated with that particular target.
+ //
+ // E.g.,
+ // `{
+ // "commit_hash" : "620506a88",
+ // }`.
+ Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *MonitoringAssignment_Target) Reset() {
+ *x = MonitoringAssignment_Target{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_observability_v1_mads_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MonitoringAssignment_Target) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MonitoringAssignment_Target) ProtoMessage() {}
+
+func (x *MonitoringAssignment_Target) ProtoReflect() protoreflect.Message {
+ mi := &file_observability_v1_mads_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MonitoringAssignment_Target.ProtoReflect.Descriptor instead.
+func (*MonitoringAssignment_Target) Descriptor() ([]byte, []int) {
+ return file_observability_v1_mads_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *MonitoringAssignment_Target) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *MonitoringAssignment_Target) GetScheme() string {
+ if x != nil {
+ return x.Scheme
+ }
+ return ""
+}
+
+func (x *MonitoringAssignment_Target) GetAddress() string {
+ if x != nil {
+ return x.Address
+ }
+ return ""
+}
+
+func (x *MonitoringAssignment_Target) GetMetricsPath() string {
+ if x != nil {
+ return x.MetricsPath
+ }
+ return ""
+}
+
+func (x *MonitoringAssignment_Target) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+var File_observability_v1_mads_proto protoreflect.FileDescriptor
+
+var file_observability_v1_mads_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2f,
+ 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x64, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x6b,
+ 0x75, 0x6d, 0x61, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
+ 0x79, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33,
+ 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x04, 0x0a, 0x14, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74,
+ 0x12, 0x1b, 0x0a, 0x04, 0x6d, 0x65, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x73, 0x68, 0x12, 0x21, 0x0a,
+ 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x12, 0x4c, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x32, 0x2e, 0x6b, 0x75, 0x6d, 0x61, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61,
+ 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x4f,
+ 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37,
+ 0x2e, 0x6b, 0x75, 0x6d, 0x61, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c,
+ 0x69, 0x74, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a,
+ 0x9f, 0x02, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20,
+ 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01,
+ 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x20, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x56,
+ 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+ 0x2e, 0x6b, 0x75, 0x6d, 0x61, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c,
+ 0x69, 0x74, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06,
+ 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xe6, 0x03, 0x0a,
+ 0x24, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x73, 0x73, 0x69, 0x67,
+ 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x89, 0x01, 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d,
+ 0x65, 0x6e, 0x74, 0x73, 0x12, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76,
+ 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72,
+ 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76,
+ 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30,
+ 0x01, 0x12, 0x80, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74,
+ 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73,
+ 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x28, 0x01, 0x30, 0x01, 0x12, 0xae, 0x01, 0x0a, 0x1a, 0x46, 0x65, 0x74, 0x63, 0x68, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33,
+ 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x33, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x22, 0x22, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69,
+ 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x03, 0x3a, 0x01, 0x2a, 0x42, 0x04, 0x5a, 0x02, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_observability_v1_mads_proto_rawDescOnce sync.Once
+ file_observability_v1_mads_proto_rawDescData = file_observability_v1_mads_proto_rawDesc
+)
+
+func file_observability_v1_mads_proto_rawDescGZIP() []byte {
+ file_observability_v1_mads_proto_rawDescOnce.Do(func() {
+ file_observability_v1_mads_proto_rawDescData = protoimpl.X.CompressGZIP(file_observability_v1_mads_proto_rawDescData)
+ })
+ return file_observability_v1_mads_proto_rawDescData
+}
+
+var file_observability_v1_mads_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_observability_v1_mads_proto_goTypes = []interface{}{
+ (*MonitoringAssignment)(nil), // 0: kuma.observability.v1.MonitoringAssignment
+ (*MonitoringAssignment_Target)(nil), // 1: kuma.observability.v1.MonitoringAssignment.Target
+ nil, // 2: kuma.observability.v1.MonitoringAssignment.LabelsEntry
+ nil, // 3: kuma.observability.v1.MonitoringAssignment.Target.LabelsEntry
+ (*v3.DeltaDiscoveryRequest)(nil), // 4: envoy.service.discovery.v3.DeltaDiscoveryRequest
+ (*v3.DiscoveryRequest)(nil), // 5: envoy.service.discovery.v3.DiscoveryRequest
+ (*v3.DeltaDiscoveryResponse)(nil), // 6: envoy.service.discovery.v3.DeltaDiscoveryResponse
+ (*v3.DiscoveryResponse)(nil), // 7: envoy.service.discovery.v3.DiscoveryResponse
+}
+var file_observability_v1_mads_proto_depIdxs = []int32{
+ 1, // 0: kuma.observability.v1.MonitoringAssignment.targets:type_name -> kuma.observability.v1.MonitoringAssignment.Target
+ 2, // 1: kuma.observability.v1.MonitoringAssignment.labels:type_name -> kuma.observability.v1.MonitoringAssignment.LabelsEntry
+ 3, // 2: kuma.observability.v1.MonitoringAssignment.Target.labels:type_name -> kuma.observability.v1.MonitoringAssignment.Target.LabelsEntry
+ 4, // 3: kuma.observability.v1.MonitoringAssignmentDiscoveryService.DeltaMonitoringAssignments:input_type -> envoy.service.discovery.v3.DeltaDiscoveryRequest
+ 5, // 4: kuma.observability.v1.MonitoringAssignmentDiscoveryService.StreamMonitoringAssignments:input_type -> envoy.service.discovery.v3.DiscoveryRequest
+ 5, // 5: kuma.observability.v1.MonitoringAssignmentDiscoveryService.FetchMonitoringAssignments:input_type -> envoy.service.discovery.v3.DiscoveryRequest
+ 6, // 6: kuma.observability.v1.MonitoringAssignmentDiscoveryService.DeltaMonitoringAssignments:output_type -> envoy.service.discovery.v3.DeltaDiscoveryResponse
+ 7, // 7: kuma.observability.v1.MonitoringAssignmentDiscoveryService.StreamMonitoringAssignments:output_type -> envoy.service.discovery.v3.DiscoveryResponse
+ 7, // 8: kuma.observability.v1.MonitoringAssignmentDiscoveryService.FetchMonitoringAssignments:output_type -> envoy.service.discovery.v3.DiscoveryResponse
+ 6, // [6:9] is the sub-list for method output_type
+ 3, // [3:6] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_observability_v1_mads_proto_init() }
+func file_observability_v1_mads_proto_init() {
+ if File_observability_v1_mads_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_observability_v1_mads_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MonitoringAssignment); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_observability_v1_mads_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MonitoringAssignment_Target); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_observability_v1_mads_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_observability_v1_mads_proto_goTypes,
+ DependencyIndexes: file_observability_v1_mads_proto_depIdxs,
+ MessageInfos: file_observability_v1_mads_proto_msgTypes,
+ }.Build()
+ File_observability_v1_mads_proto = out.File
+ file_observability_v1_mads_proto_rawDesc = nil
+ file_observability_v1_mads_proto_goTypes = nil
+ file_observability_v1_mads_proto_depIdxs = nil
+}
+
+// MonitoringAssignmentDiscoveryServiceServer is the server API for MonitoringAssignmentDiscoveryService service.
+type MonitoringAssignmentDiscoveryServiceServer interface {
+ // HTTP
+ FetchMonitoringAssignments(context.Context, *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error)
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/xds/xds.go b/vendor/github.com/prometheus/prometheus/discovery/xds/xds.go
new file mode 100644
index 000000000..48bdbab02
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/discovery/xds/xds.go
@@ -0,0 +1,173 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xds
+
+import (
+ "context"
+ "time"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/known/anypb"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+)
+
+const (
+ // Constants for instrumentation.
+ namespace = "prometheus"
+)
+
+// ProtocolVersion is the xDS protocol version.
+type ProtocolVersion string
+
+const (
+ ProtocolV3 = ProtocolVersion("v3")
+)
+
+type HTTPConfig struct {
+ config.HTTPClientConfig `yaml:",inline"`
+}
+
+// SDConfig is a base config for xDS-based SD mechanisms.
+type SDConfig struct {
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
+ FetchTimeout model.Duration `yaml:"fetch_timeout,omitempty"`
+ Server string `yaml:"server,omitempty"`
+}
+
+// mustRegisterMessage registers the provided message type in the typeRegistry, and panics
+// if there is an error.
+func mustRegisterMessage(typeRegistry *protoregistry.Types, mt protoreflect.MessageType) {
+ if err := typeRegistry.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+}
+
+func init() {
+ // Register top-level SD Configs.
+ discovery.RegisterConfig(&KumaSDConfig{})
+
+ // Register metrics.
+ prometheus.MustRegister(kumaFetchDuration, kumaFetchSkipUpdateCount, kumaFetchFailuresCount)
+
+ // Register protobuf types that need to be marshalled/ unmarshalled.
+ mustRegisterMessage(protoTypes, (&v3.DiscoveryRequest{}).ProtoReflect().Type())
+ mustRegisterMessage(protoTypes, (&v3.DiscoveryResponse{}).ProtoReflect().Type())
+ mustRegisterMessage(protoTypes, (&MonitoringAssignment{}).ProtoReflect().Type())
+}
+
+var (
+ protoTypes = new(protoregistry.Types)
+ protoUnmarshalOptions = proto.UnmarshalOptions{
+ DiscardUnknown: true, // Only want known fields.
+ Merge: true, // Always using new messages.
+ Resolver: protoTypes, // Only want known types.
+ }
+ protoJSONUnmarshalOptions = protojson.UnmarshalOptions{
+ DiscardUnknown: true, // Only want known fields.
+ Resolver: protoTypes, // Only want known types.
+ }
+ protoJSONMarshalOptions = protojson.MarshalOptions{
+ UseProtoNames: true,
+ Resolver: protoTypes, // Only want known types.
+ }
+)
+
+// resourceParser is a function that takes raw discovered objects and translates them into
+// targetgroup.Group Targets. On error, no updates are sent to the scrape manager and the failure count is incremented.
+type resourceParser func(resources []*anypb.Any, typeUrl string) ([]model.LabelSet, error)
+
+// fetchDiscovery implements long-polling via xDS Fetch REST-JSON.
+type fetchDiscovery struct {
+ client ResourceClient
+ source string
+
+ refreshInterval time.Duration
+
+ parseResources resourceParser
+ logger log.Logger
+
+ fetchDuration prometheus.Observer
+ fetchSkipUpdateCount prometheus.Counter
+ fetchFailuresCount prometheus.Counter
+}
+
+func (d *fetchDiscovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
+ defer d.client.Close()
+
+ ticker := time.NewTicker(d.refreshInterval)
+
+ for {
+ select {
+ case <-ctx.Done():
+ ticker.Stop()
+ return
+ default:
+ d.poll(ctx, ch)
+ <-ticker.C
+ }
+ }
+}
+
+func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Group) {
+ t0 := time.Now()
+ response, err := d.client.Fetch(ctx)
+ elapsed := time.Since(t0)
+ d.fetchDuration.Observe(elapsed.Seconds())
+
+ // Check the context before in order to exit early.
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ if err != nil {
+ level.Error(d.logger).Log("msg", "error parsing resources", "err", err)
+ d.fetchFailuresCount.Inc()
+ return
+ }
+
+ if response == nil {
+ // No update needed.
+ d.fetchSkipUpdateCount.Inc()
+ return
+ }
+
+ parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl)
+ if err != nil {
+ level.Error(d.logger).Log("msg", "error parsing resources", "err", err)
+ d.fetchFailuresCount.Inc()
+ return
+ }
+
+ level.Debug(d.logger).Log("msg", "Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets))
+
+ select {
+ case <-ctx.Done():
+ return
+ case ch <- []*targetgroup.Group{{Source: d.source, Targets: parsedTargets}}:
+ }
+}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go
index 09edf6e0e..528bdf073 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go
@@ -22,10 +22,10 @@ import (
"strings"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
+ "github.com/go-zookeeper/zk"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
- "github.com/samuel/go-zookeeper/zk"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go
index 585661350..7af21c565 100644
--- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go
+++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go
@@ -28,8 +28,8 @@ import (
"sync"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/go-openapi/strfmt"
"github.com/pkg/errors"
"github.com/prometheus/alertmanager/api/v2/models"
@@ -634,7 +634,7 @@ type alertmanagerSet struct {
}
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
- client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager", false, false)
+ client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager", config_util.WithHTTP2Disabled())
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go b/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
index c6ea0db94..27ba64d4b 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
@@ -15,10 +15,36 @@ package exemplar
import "github.com/prometheus/prometheus/pkg/labels"
+// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters
+// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
+const ExemplarMaxLabelSetLength = 128
+
// Exemplar is additional information associated with a time series.
type Exemplar struct {
- Labels labels.Labels
- Value float64
+ Labels labels.Labels `json:"labels"`
+ Value float64 `json:"value"`
+ Ts int64 `json:"timestamp"`
HasTs bool
- Ts int64
+}
+
+type QueryResult struct {
+ SeriesLabels labels.Labels `json:"seriesLabels"`
+ Exemplars []Exemplar `json:"exemplars"`
+}
+
+// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for
+// both exemplars then the timestamps will be ignored for the comparison. This can come up
+// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp
+// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without
+// an exported timestamp, as a duplicate of itself for each subsequent scrape.
+func (e Exemplar) Equals(e2 Exemplar) bool {
+ if !labels.Equal(e.Labels, e2.Labels) {
+ return false
+ }
+
+ if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts {
+ return false
+ }
+
+ return e.Value == e2.Value
}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go b/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go
index 88d463233..f299c40f6 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go
@@ -28,17 +28,18 @@ const (
MatchNotRegexp
)
+var matchTypeToStr = [...]string{
+ MatchEqual: "=",
+ MatchNotEqual: "!=",
+ MatchRegexp: "=~",
+ MatchNotRegexp: "!~",
+}
+
func (m MatchType) String() string {
- typeToStr := map[MatchType]string{
- MatchEqual: "=",
- MatchNotEqual: "!=",
- MatchRegexp: "=~",
- MatchNotRegexp: "!~",
- }
- if str, ok := typeToStr[m]; ok {
- return str
+ if m < MatchEqual || m > MatchNotRegexp {
+ panic("unknown match type")
}
- panic("unknown match type")
+ return matchTypeToStr[m]
}
// Matcher models the matching of a label.
diff --git a/vendor/github.com/prometheus/prometheus/pkg/logging/dedupe.go b/vendor/github.com/prometheus/prometheus/pkg/logging/dedupe.go
index 1d911ca2f..d490a6afd 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/logging/dedupe.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/logging/dedupe.go
@@ -18,7 +18,7 @@ import (
"sync"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/go-logfmt/logfmt"
)
diff --git a/vendor/github.com/prometheus/prometheus/pkg/logging/file.go b/vendor/github.com/prometheus/prometheus/pkg/logging/file.go
index be118fad0..3c0c3e3b0 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/logging/file.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/logging/file.go
@@ -17,7 +17,7 @@ import (
"os"
"time"
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/prometheus/prometheus/pkg/logging/ratelimit.go b/vendor/github.com/prometheus/prometheus/pkg/logging/ratelimit.go
index d3567eaa0..32d1e249e 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/logging/ratelimit.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/logging/ratelimit.go
@@ -14,7 +14,7 @@
package logging
import (
- "github.com/go-kit/kit/log"
+ "github.com/go-kit/log"
"golang.org/x/time/rate"
)
diff --git a/vendor/github.com/prometheus/prometheus/pkg/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/pkg/relabel/relabel.go
index 0bd00fe8a..ec452f5b5 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/relabel/relabel.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/relabel/relabel.go
@@ -100,6 +100,9 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Regex.Regexp == nil {
c.Regex = MustNewRegexp("")
}
+ if c.Action == "" {
+ return errors.Errorf("relabel action cannot be empty")
+ }
if c.Modulus == 0 && c.Action == HashMod {
return errors.Errorf("relabel configuration for hashmod requires non-zero modulus")
}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go
index 8267bc90e..920c7a203 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go
@@ -223,10 +223,11 @@ func testTemplateParsing(rl *RuleNode) (errs []error) {
}
// Trying to parse templates.
- tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0)
+ tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, "", 0)
defs := []string{
"{{$labels := .Labels}}",
"{{$externalLabels := .ExternalLabels}}",
+ "{{$externalURL := .ExternalURL}}",
"{{$value := .Value}}",
}
parseTest := func(text string) error {
diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go
index 6cfdd8391..565efd359 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go
@@ -306,11 +306,10 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
t2 := p.nextToken()
if t2 == tBraceOpen {
- offsets, err := p.parseLVals()
+ p.offsets, err = p.parseLVals(p.offsets)
if err != nil {
return EntryInvalid, err
}
- p.offsets = append(p.offsets, offsets...)
p.series = p.l.b[p.start:p.l.i]
t2 = p.nextToken()
}
@@ -336,6 +335,9 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return EntryInvalid, err
}
+ if math.IsNaN(ts) || math.IsInf(ts, 0) {
+ return EntryInvalid, errors.New("invalid timestamp")
+ }
p.ts = int64(ts * 1000)
switch t3 := p.nextToken(); t3 {
case tLinebreak:
@@ -364,12 +366,12 @@ func (p *OpenMetricsParser) parseComment() error {
return err
}
+ var err error
// Parse the labels.
- offsets, err := p.parseLVals()
+ p.eOffsets, err = p.parseLVals(p.eOffsets)
if err != nil {
return err
}
- p.eOffsets = append(p.eOffsets, offsets...)
p.exemplar = p.l.b[p.start:p.l.i]
// Get the value.
@@ -392,6 +394,9 @@ func (p *OpenMetricsParser) parseComment() error {
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return err
}
+ if math.IsNaN(ts) || math.IsInf(ts, 0) {
+ return errors.New("invalid exemplar timestamp")
+ }
p.exemplarTs = int64(ts * 1000)
switch t3 := p.nextToken(); t3 {
case tLinebreak:
@@ -404,8 +409,7 @@ func (p *OpenMetricsParser) parseComment() error {
return nil
}
-func (p *OpenMetricsParser) parseLVals() ([]int, error) {
- var offsets []int
+func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
first := true
for {
t := p.nextToken()
diff --git a/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go b/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go
index a7f03b0ca..93458f644 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go
@@ -13,7 +13,10 @@
package timestamp
-import "time"
+import (
+ "math"
+ "time"
+)
// FromTime returns a new millisecond timestamp from a time.
func FromTime(t time.Time) int64 {
@@ -24,3 +27,8 @@ func FromTime(t time.Time) int64 {
func Time(ts int64) time.Time {
return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC()
}
+
+// FromFloatSeconds returns a millisecond timestamp from float seconds.
+func FromFloatSeconds(ts float64) int64 {
+ return int64(math.Round(ts * 1000))
+}
diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go
index 2a5bd8c8e..b3cf44884 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go
@@ -1011,10 +1011,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthRemote
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
@@ -1168,10 +1165,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthRemote
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
@@ -1256,10 +1250,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthRemote
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
@@ -1418,10 +1409,7 @@ func (m *Query) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthRemote
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
@@ -1506,10 +1494,7 @@ func (m *QueryResult) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthRemote
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
@@ -1613,10 +1598,7 @@ func (m *ChunkedReadResponse) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthRemote
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go
index 5e593b73d..b29170e53 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go
@@ -96,7 +96,7 @@ func (x LabelMatcher_Type) String() string {
}
func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{5, 0}
+ return fileDescriptor_d938547f84707355, []int{6, 0}
}
// We require this to match chunkenc.Encoding.
@@ -122,7 +122,7 @@ func (x Chunk_Encoding) String() string {
}
func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{7, 0}
+ return fileDescriptor_d938547f84707355, []int{8, 0}
}
type MetricMetadata struct {
@@ -199,7 +199,9 @@ func (m *MetricMetadata) GetUnit() string {
}
type Sample struct {
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ // timestamp is in ms format, see pkg/timestamp/timestamp.go for
+ // conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -253,20 +255,89 @@ func (m *Sample) GetTimestamp() int64 {
return 0
}
-// TimeSeries represents samples and labels for a single time series.
-type TimeSeries struct {
- Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
- Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+type Exemplar struct {
+ // Optional, can be empty.
+ Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+ // timestamp is in ms format, see pkg/timestamp/timestamp.go for
+ // conversion from time.Time to Prometheus timestamp.
+ Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
+func (m *Exemplar) Reset() { *m = Exemplar{} }
+func (m *Exemplar) String() string { return proto.CompactTextString(m) }
+func (*Exemplar) ProtoMessage() {}
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d938547f84707355, []int{2}
+}
+func (m *Exemplar) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Exemplar) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Exemplar.Merge(m, src)
+}
+func (m *Exemplar) XXX_Size() int {
+ return m.Size()
+}
+func (m *Exemplar) XXX_DiscardUnknown() {
+ xxx_messageInfo_Exemplar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+
+func (m *Exemplar) GetLabels() []Label {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *Exemplar) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func (m *Exemplar) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+// TimeSeries represents samples and labels for a single time series.
+type TimeSeries struct {
+ // For a timeseries to be valid, and for the samples and exemplars
+ // to be ingested by the remote system properly, the labels field is required.
+ Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
+ Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+ Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{2}
+ return fileDescriptor_d938547f84707355, []int{3}
}
func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -309,6 +380,13 @@ func (m *TimeSeries) GetSamples() []Sample {
return nil
}
+func (m *TimeSeries) GetExemplars() []Exemplar {
+ if m != nil {
+ return m.Exemplars
+ }
+ return nil
+}
+
type Label struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
@@ -321,7 +399,7 @@ func (m *Label) Reset() { *m = Label{} }
func (m *Label) String() string { return proto.CompactTextString(m) }
func (*Label) ProtoMessage() {}
func (*Label) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{3}
+ return fileDescriptor_d938547f84707355, []int{4}
}
func (m *Label) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -375,7 +453,7 @@ func (m *Labels) Reset() { *m = Labels{} }
func (m *Labels) String() string { return proto.CompactTextString(m) }
func (*Labels) ProtoMessage() {}
func (*Labels) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{4}
+ return fileDescriptor_d938547f84707355, []int{5}
}
func (m *Labels) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -425,7 +503,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{5}
+ return fileDescriptor_d938547f84707355, []int{6}
}
func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -492,7 +570,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} }
func (m *ReadHints) String() string { return proto.CompactTextString(m) }
func (*ReadHints) ProtoMessage() {}
func (*ReadHints) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{6}
+ return fileDescriptor_d938547f84707355, []int{7}
}
func (m *ReadHints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -586,7 +664,7 @@ func (m *Chunk) Reset() { *m = Chunk{} }
func (m *Chunk) String() string { return proto.CompactTextString(m) }
func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{7}
+ return fileDescriptor_d938547f84707355, []int{8}
}
func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -658,7 +736,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} }
func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) }
func (*ChunkedSeries) ProtoMessage() {}
func (*ChunkedSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{8}
+ return fileDescriptor_d938547f84707355, []int{9}
}
func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -707,6 +785,7 @@ func init() {
proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value)
proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata")
proto.RegisterType((*Sample)(nil), "prometheus.Sample")
+ proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar")
proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries")
proto.RegisterType((*Label)(nil), "prometheus.Label")
proto.RegisterType((*Labels)(nil), "prometheus.Labels")
@@ -719,51 +798,53 @@ func init() {
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var fileDescriptor_d938547f84707355 = []byte{
- // 690 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x6e, 0xda, 0x40,
- 0x10, 0xce, 0xfa, 0x17, 0x86, 0x04, 0x39, 0xab, 0x54, 0x75, 0xa3, 0x96, 0x22, 0x4b, 0x95, 0x38,
- 0x54, 0x44, 0x49, 0x4f, 0x91, 0x7a, 0x21, 0x91, 0xf3, 0xa3, 0xc6, 0xa0, 0x2c, 0xa0, 0xfe, 0x5c,
- 0xd0, 0x02, 0x1b, 0xb0, 0x8a, 0x8d, 0xe3, 0x5d, 0xaa, 0xf0, 0x20, 0xbd, 0xf5, 0x15, 0x7a, 0xe8,
- 0x5b, 0xe4, 0xd8, 0x27, 0xa8, 0xaa, 0x3c, 0x49, 0xb5, 0x6b, 0x13, 0x13, 0xa5, 0x97, 0xf6, 0x36,
- 0xf3, 0x7d, 0xdf, 0xfc, 0xec, 0xcc, 0xd8, 0x50, 0x11, 0xcb, 0x84, 0xf1, 0x66, 0x92, 0xce, 0xc5,
- 0x1c, 0x43, 0x92, 0xce, 0x23, 0x26, 0xa6, 0x6c, 0xc1, 0x77, 0x77, 0x26, 0xf3, 0xc9, 0x5c, 0xc1,
- 0x7b, 0xd2, 0xca, 0x14, 0xde, 0x37, 0x0d, 0xaa, 0x01, 0x13, 0x69, 0x38, 0x0a, 0x98, 0xa0, 0x63,
- 0x2a, 0x28, 0x3e, 0x04, 0x43, 0xe6, 0x70, 0x51, 0x1d, 0x35, 0xaa, 0x07, 0xaf, 0x9a, 0x45, 0x8e,
- 0xe6, 0x43, 0x65, 0xee, 0xf6, 0x96, 0x09, 0x23, 0x2a, 0x04, 0xbf, 0x06, 0x1c, 0x29, 0x6c, 0x70,
- 0x45, 0xa3, 0x70, 0xb6, 0x1c, 0xc4, 0x34, 0x62, 0xae, 0x56, 0x47, 0x8d, 0x32, 0x71, 0x32, 0xe6,
- 0x44, 0x11, 0x6d, 0x1a, 0x31, 0x8c, 0xc1, 0x98, 0xb2, 0x59, 0xe2, 0x1a, 0x8a, 0x57, 0xb6, 0xc4,
- 0x16, 0x71, 0x28, 0x5c, 0x33, 0xc3, 0xa4, 0xed, 0x2d, 0x01, 0x8a, 0x4a, 0xb8, 0x02, 0x76, 0xbf,
- 0xfd, 0xae, 0xdd, 0x79, 0xdf, 0x76, 0x36, 0xa4, 0x73, 0xdc, 0xe9, 0xb7, 0x7b, 0x3e, 0x71, 0x10,
- 0x2e, 0x83, 0x79, 0xda, 0xea, 0x9f, 0xfa, 0x8e, 0x86, 0xb7, 0xa0, 0x7c, 0x76, 0xde, 0xed, 0x75,
- 0x4e, 0x49, 0x2b, 0x70, 0x74, 0x8c, 0xa1, 0xaa, 0x98, 0x02, 0x33, 0x64, 0x68, 0xb7, 0x1f, 0x04,
- 0x2d, 0xf2, 0xd1, 0x31, 0x71, 0x09, 0x8c, 0xf3, 0xf6, 0x49, 0xc7, 0xb1, 0xf0, 0x26, 0x94, 0xba,
- 0xbd, 0x56, 0xcf, 0xef, 0xfa, 0x3d, 0xc7, 0xf6, 0xde, 0x82, 0xd5, 0xa5, 0x51, 0x32, 0x63, 0x78,
- 0x07, 0xcc, 0x2f, 0x74, 0xb6, 0xc8, 0xc6, 0x82, 0x48, 0xe6, 0xe0, 0xe7, 0x50, 0x16, 0x61, 0xc4,
- 0xb8, 0xa0, 0x51, 0xa2, 0xde, 0xa9, 0x93, 0x02, 0xf0, 0xae, 0x01, 0x7a, 0x61, 0xc4, 0xba, 0x2c,
- 0x0d, 0x19, 0xc7, 0x7b, 0x60, 0xcd, 0xe8, 0x90, 0xcd, 0xb8, 0x8b, 0xea, 0x7a, 0xa3, 0x72, 0xb0,
- 0xbd, 0x3e, 0xd9, 0x0b, 0xc9, 0x1c, 0x19, 0xb7, 0xbf, 0x5e, 0x6e, 0x90, 0x5c, 0x86, 0x0f, 0xc0,
- 0xe6, 0xaa, 0x38, 0x77, 0x35, 0x15, 0x81, 0xd7, 0x23, 0xb2, 0xbe, 0xf2, 0x90, 0x95, 0xd0, 0xdb,
- 0x07, 0x53, 0xa5, 0x92, 0x83, 0x54, 0xc3, 0x47, 0xd9, 0x20, 0xa5, 0x5d, 0xbc, 0x21, 0xdb, 0x48,
- 0xe6, 0x78, 0x87, 0x60, 0x5d, 0x64, 0x05, 0xff, 0xb5, 0x43, 0xef, 0x2b, 0x82, 0x4d, 0x85, 0x07,
- 0x54, 0x8c, 0xa6, 0x2c, 0xc5, 0xfb, 0x0f, 0x6e, 0xe7, 0xc5, 0xa3, 0xf8, 0x5c, 0xd7, 0x5c, 0xbb,
- 0x99, 0x55, 0xa3, 0xda, 0xdf, 0x1a, 0xd5, 0xd7, 0x1b, 0x6d, 0x80, 0xa1, 0x2e, 0xc0, 0x02, 0xcd,
- 0xbf, 0x74, 0x36, 0xb0, 0x0d, 0x7a, 0xdb, 0xbf, 0x74, 0x90, 0x04, 0x88, 0xdc, 0xba, 0x04, 0x88,
- 0xef, 0xe8, 0xde, 0x0f, 0x04, 0x65, 0xc2, 0xe8, 0xf8, 0x2c, 0x8c, 0x05, 0xc7, 0x4f, 0xc1, 0xe6,
- 0x82, 0x25, 0x83, 0x88, 0xab, 0xbe, 0x74, 0x62, 0x49, 0x37, 0xe0, 0xb2, 0xf4, 0xd5, 0x22, 0x1e,
- 0xad, 0x4a, 0x4b, 0x1b, 0x3f, 0x83, 0x12, 0x17, 0x34, 0x15, 0x52, 0xad, 0x2b, 0xb5, 0xad, 0xfc,
- 0x80, 0xe3, 0x27, 0x60, 0xb1, 0x78, 0x2c, 0x09, 0x43, 0x11, 0x26, 0x8b, 0xc7, 0x01, 0xc7, 0xbb,
- 0x50, 0x9a, 0xa4, 0xf3, 0x45, 0x12, 0xc6, 0x13, 0xd7, 0xac, 0xeb, 0x8d, 0x32, 0xb9, 0xf7, 0x71,
- 0x15, 0xb4, 0xe1, 0xd2, 0xb5, 0xea, 0xa8, 0x51, 0x22, 0xda, 0x70, 0x29, 0xb3, 0xa7, 0x34, 0x9e,
- 0x30, 0x99, 0xc4, 0xce, 0xb2, 0x2b, 0x3f, 0xe0, 0xde, 0x77, 0x04, 0xe6, 0xf1, 0x74, 0x11, 0x7f,
- 0xc6, 0x35, 0xa8, 0x44, 0x61, 0x3c, 0x90, 0x77, 0x54, 0xf4, 0x5c, 0x8e, 0xc2, 0x58, 0x1e, 0x53,
- 0xc0, 0x15, 0x4f, 0x6f, 0xee, 0xf9, 0xfc, 0xec, 0x22, 0x7a, 0x93, 0xf3, 0xcd, 0x7c, 0x09, 0xba,
- 0x5a, 0xc2, 0xee, 0xfa, 0x12, 0x54, 0x81, 0xa6, 0x1f, 0x8f, 0xe6, 0xe3, 0x30, 0x9e, 0x14, 0x1b,
- 0x90, 0x9f, 0xb3, 0x7a, 0xd5, 0x26, 0x51, 0xb6, 0x57, 0x87, 0xd2, 0x4a, 0xf5, 0xf0, 0x8b, 0xb3,
- 0x41, 0xff, 0xd0, 0x21, 0x0e, 0xf2, 0xae, 0x61, 0x4b, 0x65, 0x63, 0xe3, 0xff, 0xbd, 0xef, 0x3d,
- 0xb0, 0x46, 0x32, 0xc3, 0xea, 0xbc, 0xb7, 0x1f, 0x75, 0xba, 0x0a, 0xc8, 0x64, 0x47, 0x3b, 0xb7,
- 0x77, 0x35, 0xf4, 0xf3, 0xae, 0x86, 0x7e, 0xdf, 0xd5, 0xd0, 0x27, 0x4b, 0xaa, 0x93, 0xe1, 0xd0,
- 0x52, 0x7f, 0xb2, 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0xb7, 0x12, 0x44, 0xfa, 0x04,
- 0x00, 0x00,
+ // 734 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcb, 0x6e, 0xdb, 0x46,
+ 0x14, 0xf5, 0xf0, 0x29, 0x5e, 0xd9, 0x02, 0x3d, 0x50, 0x51, 0xd6, 0x68, 0x55, 0x81, 0x40, 0x01,
+ 0x2d, 0x0a, 0x19, 0x76, 0x37, 0x35, 0xd0, 0x8d, 0x6c, 0xd0, 0x0f, 0xd4, 0x94, 0xe0, 0x91, 0x84,
+ 0x3e, 0x36, 0xc2, 0x48, 0x1a, 0x4b, 0x44, 0xc4, 0x47, 0x38, 0x54, 0x60, 0x7d, 0x48, 0x76, 0xf9,
+ 0x83, 0x20, 0x8b, 0xfc, 0x85, 0x97, 0xf9, 0x82, 0x20, 0xf0, 0x97, 0x04, 0x33, 0xa4, 0x4c, 0x29,
+ 0x4e, 0x16, 0xce, 0xee, 0xde, 0x7b, 0xce, 0xb9, 0x8f, 0xb9, 0x97, 0x84, 0x6a, 0xb6, 0x4a, 0x18,
+ 0x6f, 0x27, 0x69, 0x9c, 0xc5, 0x18, 0x92, 0x34, 0x0e, 0x59, 0x36, 0x67, 0x4b, 0x7e, 0x50, 0x9f,
+ 0xc5, 0xb3, 0x58, 0x86, 0x0f, 0x85, 0x95, 0x33, 0xdc, 0x37, 0x0a, 0xd4, 0x7c, 0x96, 0xa5, 0xc1,
+ 0xc4, 0x67, 0x19, 0x9d, 0xd2, 0x8c, 0xe2, 0x13, 0xd0, 0x44, 0x0e, 0x07, 0x35, 0x51, 0xab, 0x76,
+ 0xfc, 0x5b, 0xbb, 0xcc, 0xd1, 0xde, 0x66, 0x16, 0xee, 0x60, 0x95, 0x30, 0x22, 0x25, 0xf8, 0x77,
+ 0xc0, 0xa1, 0x8c, 0x8d, 0x6e, 0x69, 0x18, 0x2c, 0x56, 0xa3, 0x88, 0x86, 0xcc, 0x51, 0x9a, 0xa8,
+ 0x65, 0x11, 0x3b, 0x47, 0xce, 0x25, 0xd0, 0xa5, 0x21, 0xc3, 0x18, 0xb4, 0x39, 0x5b, 0x24, 0x8e,
+ 0x26, 0x71, 0x69, 0x8b, 0xd8, 0x32, 0x0a, 0x32, 0x47, 0xcf, 0x63, 0xc2, 0x76, 0x57, 0x00, 0x65,
+ 0x25, 0x5c, 0x05, 0x73, 0xd8, 0xfd, 0xbb, 0xdb, 0xfb, 0xa7, 0x6b, 0xef, 0x08, 0xe7, 0xac, 0x37,
+ 0xec, 0x0e, 0x3c, 0x62, 0x23, 0x6c, 0x81, 0x7e, 0xd1, 0x19, 0x5e, 0x78, 0xb6, 0x82, 0xf7, 0xc0,
+ 0xba, 0xbc, 0xea, 0x0f, 0x7a, 0x17, 0xa4, 0xe3, 0xdb, 0x2a, 0xc6, 0x50, 0x93, 0x48, 0x19, 0xd3,
+ 0x84, 0xb4, 0x3f, 0xf4, 0xfd, 0x0e, 0xf9, 0xcf, 0xd6, 0x71, 0x05, 0xb4, 0xab, 0xee, 0x79, 0xcf,
+ 0x36, 0xf0, 0x2e, 0x54, 0xfa, 0x83, 0xce, 0xc0, 0xeb, 0x7b, 0x03, 0xdb, 0x74, 0xff, 0x02, 0xa3,
+ 0x4f, 0xc3, 0x64, 0xc1, 0x70, 0x1d, 0xf4, 0x57, 0x74, 0xb1, 0xcc, 0x9f, 0x05, 0x91, 0xdc, 0xc1,
+ 0x3f, 0x83, 0x95, 0x05, 0x21, 0xe3, 0x19, 0x0d, 0x13, 0x39, 0xa7, 0x4a, 0xca, 0x80, 0x1b, 0x43,
+ 0xc5, 0xbb, 0x63, 0x61, 0xb2, 0xa0, 0x29, 0x3e, 0x04, 0x63, 0x41, 0xc7, 0x6c, 0xc1, 0x1d, 0xd4,
+ 0x54, 0x5b, 0xd5, 0xe3, 0xfd, 0xcd, 0x77, 0xbd, 0x16, 0xc8, 0xa9, 0x76, 0xff, 0xf1, 0xd7, 0x1d,
+ 0x52, 0xd0, 0xca, 0x82, 0xca, 0x37, 0x0b, 0xaa, 0x5f, 0x16, 0x7c, 0x8b, 0x00, 0x06, 0x41, 0xc8,
+ 0xfa, 0x2c, 0x0d, 0x18, 0x7f, 0x7e, 0xcd, 0x63, 0x30, 0xb9, 0x1c, 0x97, 0x3b, 0x8a, 0x54, 0xe0,
+ 0x4d, 0x45, 0xfe, 0x12, 0x85, 0x64, 0x4d, 0xc4, 0x7f, 0x82, 0xc5, 0x8a, 0x21, 0xb9, 0xa3, 0x4a,
+ 0x55, 0x7d, 0x53, 0xb5, 0x7e, 0x81, 0x42, 0x57, 0x92, 0xdd, 0x23, 0xd0, 0x65, 0x13, 0x62, 0xe9,
+ 0xf2, 0x50, 0x50, 0xbe, 0x74, 0x61, 0x6f, 0x8f, 0x6f, 0x15, 0xe3, 0xbb, 0x27, 0x60, 0x5c, 0xe7,
+ 0xad, 0x3e, 0x77, 0x36, 0xf7, 0x35, 0x82, 0x5d, 0x19, 0xf7, 0x69, 0x36, 0x99, 0xb3, 0x14, 0x1f,
+ 0x6d, 0xdd, 0xf9, 0x2f, 0x4f, 0xf4, 0x05, 0xaf, 0xbd, 0x71, 0xdf, 0xeb, 0x46, 0x95, 0xaf, 0x35,
+ 0xaa, 0x6e, 0x36, 0xda, 0x02, 0x4d, 0x5e, 0xab, 0x01, 0x8a, 0x77, 0x63, 0xef, 0x60, 0x13, 0xd4,
+ 0xae, 0x77, 0x63, 0x23, 0x11, 0x20, 0xe2, 0x42, 0x45, 0x80, 0x78, 0xb6, 0xea, 0xbe, 0x47, 0x60,
+ 0x11, 0x46, 0xa7, 0x97, 0x41, 0x94, 0x71, 0xfc, 0x23, 0x98, 0x3c, 0x63, 0xc9, 0x28, 0xe4, 0xb2,
+ 0x2f, 0x95, 0x18, 0xc2, 0xf5, 0xb9, 0x28, 0x7d, 0xbb, 0x8c, 0x26, 0xeb, 0xd2, 0xc2, 0xc6, 0x3f,
+ 0x41, 0x85, 0x67, 0x34, 0xcd, 0x04, 0x3b, 0xbf, 0x05, 0x53, 0xfa, 0x3e, 0xc7, 0x3f, 0x80, 0xc1,
+ 0xa2, 0xa9, 0x00, 0x34, 0x09, 0xe8, 0x2c, 0x9a, 0xfa, 0x1c, 0x1f, 0x40, 0x65, 0x96, 0xc6, 0xcb,
+ 0x24, 0x88, 0x66, 0x8e, 0xde, 0x54, 0x5b, 0x16, 0x79, 0xf4, 0x71, 0x0d, 0x94, 0xf1, 0xca, 0x31,
+ 0x9a, 0xa8, 0x55, 0x21, 0xca, 0x78, 0x25, 0xb2, 0xa7, 0x34, 0x9a, 0x31, 0x91, 0xc4, 0xcc, 0xb3,
+ 0x4b, 0xdf, 0xe7, 0xee, 0x3b, 0x04, 0xfa, 0xd9, 0x7c, 0x19, 0xbd, 0xc0, 0x0d, 0xa8, 0x86, 0x41,
+ 0x34, 0x12, 0x27, 0x58, 0xf6, 0x6c, 0x85, 0x41, 0x24, 0xce, 0xd0, 0xe7, 0x12, 0xa7, 0x77, 0x8f,
+ 0x78, 0xf1, 0x89, 0x84, 0xf4, 0xae, 0xc0, 0xdb, 0xc5, 0x12, 0x54, 0xb9, 0x84, 0x83, 0xcd, 0x25,
+ 0xc8, 0x02, 0x6d, 0x2f, 0x9a, 0xc4, 0xd3, 0x20, 0x9a, 0x95, 0x1b, 0x10, 0xbf, 0x1e, 0x39, 0xd5,
+ 0x2e, 0x91, 0xb6, 0xdb, 0x84, 0xca, 0x9a, 0xb5, 0xfd, 0x77, 0x30, 0x41, 0xfd, 0xb7, 0x47, 0x6c,
+ 0xe4, 0xbe, 0x84, 0x3d, 0x99, 0x8d, 0x4d, 0xbf, 0xf7, 0xcb, 0x38, 0x04, 0x63, 0x22, 0x32, 0xac,
+ 0x3f, 0x8c, 0xfd, 0x27, 0x9d, 0xae, 0x05, 0x39, 0xed, 0xb4, 0x7e, 0xff, 0xd0, 0x40, 0x1f, 0x1e,
+ 0x1a, 0xe8, 0xd3, 0x43, 0x03, 0xfd, 0x6f, 0x08, 0x76, 0x32, 0x1e, 0x1b, 0xf2, 0xaf, 0xfb, 0xc7,
+ 0xe7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa3, 0x6c, 0x23, 0xa6, 0x05, 0x00, 0x00,
}
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
@@ -857,6 +938,58 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *Exemplar) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Value != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x11
+ }
+ if len(m.Labels) > 0 {
+ for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -881,6 +1014,20 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
+ if len(m.Exemplars) > 0 {
+ for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
if len(m.Samples) > 0 {
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -1273,6 +1420,30 @@ func (m *Sample) Size() (n int) {
return n
}
+func (m *Exemplar) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for _, e := range m.Labels {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovTypes(uint64(m.Timestamp))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
func (m *TimeSeries) Size() (n int) {
if m == nil {
return 0
@@ -1291,6 +1462,12 @@ func (m *TimeSeries) Size() (n int) {
n += 1 + l + sovTypes(uint64(l))
}
}
+ if len(m.Exemplars) > 0 {
+ for _, e := range m.Exemplars {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -1600,10 +1777,7 @@ func (m *MetricMetadata) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -1684,10 +1858,122 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
- if (iNdEx + skippy) < 0 {
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Exemplar) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Labels = append(m.Labels, Label{})
+ if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Value = float64(math.Float64frombits(v))
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -1800,16 +2086,47 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exemplars = append(m.Exemplars, Exemplar{})
+ if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -1924,10 +2241,7 @@ func (m *Label) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -2012,10 +2326,7 @@ func (m *Labels) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -2149,10 +2460,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -2363,10 +2671,7 @@ func (m *ReadHints) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -2508,10 +2813,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -2630,10 +2932,7 @@ func (m *ChunkedSeries) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
- if skippy < 0 {
- return ErrInvalidLengthTypes
- }
- if (iNdEx + skippy) < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto
index 259a0d40d..ee11f3a00 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/types.proto
+++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto
@@ -40,13 +40,27 @@ message MetricMetadata {
message Sample {
double value = 1;
+ // timestamp is in ms format, see pkg/timestamp/timestamp.go for
+ // conversion from time.Time to Prometheus timestamp.
int64 timestamp = 2;
}
+message Exemplar {
+ // Optional, can be empty.
+ repeated Label labels = 1 [(gogoproto.nullable) = false];
+ double value = 2;
+ // timestamp is in ms format, see pkg/timestamp/timestamp.go for
+ // conversion from time.Time to Prometheus timestamp.
+ int64 timestamp = 3;
+}
+
// TimeSeries represents samples and labels for a single time series.
message TimeSeries {
+ // For a timeseries to be valid, and for the samples and exemplars
+ // to be ingested by the remote system properly, the labels field is required.
repeated Label labels = 1 [(gogoproto.nullable) = false];
repeated Sample samples = 2 [(gogoproto.nullable) = false];
+ repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
}
message Label {
diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go
index b08ebfee1..0304375c0 100644
--- a/vendor/github.com/prometheus/prometheus/promql/engine.go
+++ b/vendor/github.com/prometheus/prometheus/promql/engine.go
@@ -19,6 +19,7 @@ import (
"context"
"fmt"
"math"
+ "reflect"
"regexp"
"runtime"
"sort"
@@ -26,8 +27,8 @@ import (
"sync"
"time"
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -116,6 +117,8 @@ type Query interface {
Stats() *stats.QueryTimers
// Cancel signals that a running query execution should be aborted.
Cancel()
+ // String returns the original query string.
+ String() string
}
// query implements the Query interface.
@@ -140,10 +143,17 @@ type query struct {
type QueryOrigin struct{}
// Statement implements the Query interface.
+// Calling this after Exec may result in panic,
+// see https://github.com/prometheus/prometheus/issues/8949.
func (q *query) Statement() parser.Statement {
return q.stmt
}
+// String implements the Query interface.
+func (q *query) String() string {
+ return q.q
+}
+
// Stats implements the Query interface.
func (q *query) Stats() *stats.QueryTimers {
return q.stats
@@ -208,6 +218,12 @@ type EngineOpts struct {
// NoStepSubqueryIntervalFn is the default evaluation interval of
// a subquery in milliseconds if no step in range vector was specified `[30m:]`.
NoStepSubqueryIntervalFn func(rangeMillis int64) int64
+
+ // EnableAtModifier if true enables @ modifier. Disabled otherwise.
+ EnableAtModifier bool
+
+ // EnableNegativeOffset if true enables negative (-) offset values. Disabled otherwise.
+ EnableNegativeOffset bool
}
// Engine handles the lifetime of queries from beginning to end.
@@ -222,6 +238,8 @@ type Engine struct {
queryLoggerLock sync.RWMutex
lookbackDelta time.Duration
noStepSubqueryIntervalFn func(rangeMillis int64) int64
+ enableAtModifier bool
+ enableNegativeOffset bool
NodeReplacer parser.NodeReplacer
}
@@ -303,6 +321,8 @@ func NewEngine(opts EngineOpts) *Engine {
activeQueryTracker: opts.ActiveQueryTracker,
lookbackDelta: opts.LookbackDelta,
noStepSubqueryIntervalFn: opts.NoStepSubqueryIntervalFn,
+ enableAtModifier: opts.EnableAtModifier,
+ enableNegativeOffset: opts.EnableNegativeOffset,
}
}
@@ -335,7 +355,10 @@ func (ng *Engine) NewInstantQuery(q storage.Queryable, qs string, ts time.Time)
if err != nil {
return nil, err
}
- qry := ng.newQuery(q, expr, ts, ts, 0)
+ qry, err := ng.newQuery(q, expr, ts, ts, 0)
+ if err != nil {
+ return nil, err
+ }
qry.q = qs
return qry, nil
@@ -351,26 +374,86 @@ func (ng *Engine) NewRangeQuery(q storage.Queryable, qs string, start, end time.
if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar {
return nil, errors.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type()))
}
- qry := ng.newQuery(q, expr, start, end, interval)
+ qry, err := ng.newQuery(q, expr, start, end, interval)
+ if err != nil {
+ return nil, err
+ }
qry.q = qs
return qry, nil
}
-func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end time.Time, interval time.Duration) *query {
+func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) {
es := &parser.EvalStmt{
- Expr: expr,
+ Expr: PreprocessExpr(expr, start, end),
Start: start,
End: end,
Interval: interval,
}
+ if err := ng.validateOpts(es); err != nil {
+ return nil, err
+ }
qry := &query{
stmt: es,
ng: ng,
stats: stats.NewQueryTimers(),
queryable: q,
}
- return qry
+ return qry, nil
+}
+
+var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled")
+var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled")
+
+func (ng *Engine) validateOpts(expr *parser.EvalStmt) error {
+ if ng.enableAtModifier && ng.enableNegativeOffset {
+ return nil
+ }
+
+ var atModifierUsed, negativeOffsetUsed bool
+
+ var validationErr error
+ parser.Inspect(context.TODO(), expr, func(node parser.Node, path []parser.Node) error {
+ switch n := node.(type) {
+ case *parser.VectorSelector:
+ if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END {
+ atModifierUsed = true
+ }
+ if n.OriginalOffset < 0 {
+ negativeOffsetUsed = true
+ }
+
+ case *parser.MatrixSelector:
+ vs := n.VectorSelector.(*parser.VectorSelector)
+ if vs.Timestamp != nil || vs.StartOrEnd == parser.START || vs.StartOrEnd == parser.END {
+ atModifierUsed = true
+ }
+ if vs.OriginalOffset < 0 {
+ negativeOffsetUsed = true
+ }
+
+ case *parser.SubqueryExpr:
+ if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END {
+ atModifierUsed = true
+ }
+ if n.OriginalOffset < 0 {
+ negativeOffsetUsed = true
+ }
+ }
+
+ if atModifierUsed && !ng.enableAtModifier {
+ validationErr = ErrValidationAtModifierDisabled
+ return validationErr
+ }
+ if negativeOffsetUsed && !ng.enableNegativeOffset {
+ validationErr = ErrValidationNegativeOffsetDisabled
+ return validationErr
+ }
+
+ return nil
+ }, nil)
+
+ return validationErr
}
func (ng *Engine) newTestQuery(f func(context.Context) error) Query {
@@ -447,8 +530,6 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag
// Cancel when execution is done or an error was raised.
defer q.cancel()
- const env = "query execution"
-
evalSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.EvalTotalTime)
defer evalSpanTimer.Finish()
@@ -478,8 +559,8 @@ func durationMilliseconds(d time.Duration) int64 {
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) {
prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime)
- mint := ng.findMinTime(s)
- querier, err := query.queryable.Querier(ctxPrepare, timestamp.FromTime(mint), timestamp.FromTime(s.End))
+ mint, maxt := ng.findMinMaxTime(s)
+ querier, err := query.queryable.Querier(ctxPrepare, mint, maxt)
if err != nil {
prepareSpanTimer.Finish()
return nil, nil, err
@@ -492,6 +573,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
}
prepareSpanTimer.Finish()
+ // Modify the offset of vector and matrix selectors for the @ modifier
+ // w.r.t. the start time since only 1 evaluation will be done on them.
+ setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
// Instant evaluation. This is executed as a range evaluation with one step.
if s.Start == s.End && s.Interval == 0 {
@@ -580,48 +664,102 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
return mat, warnings, nil
}
-// subqueryOffsetRange returns the sum of offsets and ranges of all subqueries in the path.
-func (ng *Engine) subqueryOffsetRange(path []parser.Node) (time.Duration, time.Duration) {
+// subqueryTimes returns the sum of offsets and ranges of all subqueries in the path.
+// If the @ modifier is used, then the offset and range is w.r.t. that timestamp
+// (i.e. the sum is reset when we have @ modifier).
+// The returned *int64 is the closest timestamp that was seen. nil for no @ modifier.
+func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) {
var (
- subqOffset time.Duration
- subqRange time.Duration
+ subqOffset, subqRange time.Duration
+ ts int64 = math.MaxInt64
)
for _, node := range path {
switch n := node.(type) {
case *parser.SubqueryExpr:
- subqOffset += n.Offset
+ subqOffset += n.OriginalOffset
subqRange += n.Range
+ if n.Timestamp != nil {
+ // The @ modifier on subquery invalidates all the offset and
+ // range till now. Hence resetting it here.
+ subqOffset = n.OriginalOffset
+ subqRange = n.Range
+ ts = *n.Timestamp
+ }
}
}
- return subqOffset, subqRange
+ var tsp *int64
+ if ts != math.MaxInt64 {
+ tsp = &ts
+ }
+ return subqOffset, subqRange, tsp
}
-func (ng *Engine) findMinTime(s *parser.EvalStmt) time.Time {
- var maxOffset time.Duration
- l := sync.Mutex{}
+func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) {
+ var minTimestamp, maxTimestamp int64 = math.MaxInt64, math.MinInt64
+ // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range.
+ // The evaluation of the VectorSelector inside then evaluates the given range and unsets
+ // the variable.
+ var evalRange time.Duration
parser.Inspect(context.TODO(), s, func(node parser.Node, path []parser.Node) error {
- l.Lock()
- defer l.Unlock()
- subqOffset, subqRange := ng.subqueryOffsetRange(path)
switch n := node.(type) {
case *parser.VectorSelector:
- if maxOffset < ng.lookbackDelta+subqOffset+subqRange {
- maxOffset = ng.lookbackDelta + subqOffset + subqRange
+ start, end := ng.getTimeRangesForSelector(s, n, path, evalRange)
+ if start < minTimestamp {
+ minTimestamp = start
}
- if n.Offset+ng.lookbackDelta+subqOffset+subqRange > maxOffset {
- maxOffset = n.Offset + ng.lookbackDelta + subqOffset + subqRange
+ if end > maxTimestamp {
+ maxTimestamp = end
}
+ evalRange = 0
+
case *parser.MatrixSelector:
- if maxOffset < n.Range+subqOffset+subqRange {
- maxOffset = n.Range + subqOffset + subqRange
- }
- if m := n.VectorSelector.(*parser.VectorSelector).Offset + n.Range + subqOffset + subqRange; m > maxOffset {
- maxOffset = m
- }
+ evalRange = n.Range
}
return nil
}, nil)
- return s.Start.Add(-maxOffset)
+
+ if maxTimestamp == math.MinInt64 {
+ // This happens when there was no selector. Hence no time range to select.
+ minTimestamp = 0
+ maxTimestamp = 0
+ }
+
+ return minTimestamp, maxTimestamp
+}
+
+func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) {
+ start, end := timestamp.FromTime(s.Start), timestamp.FromTime(s.End)
+ subqOffset, subqRange, subqTs := subqueryTimes(path)
+
+ if subqTs != nil {
+ // The timestamp on the subquery overrides the eval statement time ranges.
+ start = *subqTs
+ end = *subqTs
+ }
+
+ if n.Timestamp != nil {
+ // The timestamp on the selector overrides everything.
+ start = *n.Timestamp
+ end = *n.Timestamp
+ } else {
+ offsetMilliseconds := durationMilliseconds(subqOffset)
+ start = start - offsetMilliseconds - durationMilliseconds(subqRange)
+ end = end - offsetMilliseconds
+ }
+
+ if evalRange == 0 {
+ start = start - durationMilliseconds(ng.lookbackDelta)
+ } else {
+ // For all matrix queries we want to ensure that we have (end-start) + range selected
+ // this way we have `range` data before the start time
+ start = start - durationMilliseconds(evalRange)
+ }
+
+ offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
+ start = start - offsetMilliseconds
+ end = end - offsetMilliseconds
+
+ return start, end
}
func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) error {
@@ -639,40 +777,18 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s
if n.UnexpandedSeriesSet != nil {
return nil
}
+ start, end := ng.getTimeRangesForSelector(s, n, path, evalRange)
hints := &storage.SelectHints{
- Start: timestamp.FromTime(s.Start),
- End: timestamp.FromTime(s.End),
+ Start: start,
+ End: end,
Step: durationMilliseconds(s.Interval),
+ Range: durationMilliseconds(evalRange),
+ Func: extractFuncFromPath(path),
}
-
- // We need to make sure we select the timerange selected by the subquery.
- // The subqueryOffsetRange function gives the sum of range and the
- // sum of offset.
- // TODO(bwplotka): Add support for better hints when subquerying. See: https://github.com/prometheus/prometheus/issues/7630.
- subqOffset, subqRange := ng.subqueryOffsetRange(path)
- offsetMilliseconds := durationMilliseconds(subqOffset)
- hints.Start = hints.Start - offsetMilliseconds - durationMilliseconds(subqRange)
- hints.End = hints.End - offsetMilliseconds
-
- if evalRange == 0 {
- hints.Start = hints.Start - durationMilliseconds(ng.lookbackDelta)
- } else {
- hints.Range = durationMilliseconds(evalRange)
- // For all matrix queries we want to ensure that we have (end-start) + range selected
- // this way we have `range` data before the start time
- hints.Start = hints.Start - durationMilliseconds(evalRange)
- evalRange = 0
- }
-
- hints.Func = extractFuncFromPath(path)
+ evalRange = 0
hints.By, hints.Grouping = extractGroupsFromPath(path)
- if n.Offset > 0 {
- offsetMilliseconds := durationMilliseconds(n.Offset)
- hints.Start = hints.Start - offsetMilliseconds
- hints.End = hints.End - offsetMilliseconds
- }
-
n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...)
+
case *parser.MatrixSelector:
evalRange = n.Range
}
@@ -812,6 +928,12 @@ func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings
return v, ws, nil
}
+// EvalSeriesHelper stores extra information about a series.
+type EvalSeriesHelper struct {
+ // The grouping key used by aggregation.
+ groupingKey uint64
+}
+
// EvalNodeHelper stores extra information and caches for evaluating a single node across steps.
type EvalNodeHelper struct {
// Evaluation timestamp.
@@ -872,10 +994,12 @@ func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.L
}
// rangeEval evaluates the given expressions, and then for each step calls
-// the given function with the values computed for each expression at that
-// step. The return value is the combination into time series of all the
+// the given funcCall with the values computed for each expression at that
+// step. The return value is the combination into time series of all the
// function call results.
-func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) {
+// The prepSeries function (if provided) can be used to prepare the helper
+// for each series, then passed to each call funcCall.
+func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) {
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
matrixes := make([]Matrix, len(exprs))
origMatrixes := make([]Matrix, len(exprs))
@@ -911,6 +1035,30 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector,
enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)}
seriess := make(map[uint64]Series, biggestLen) // Output series by series hash.
tempNumSamples := ev.currentSamples
+
+ var (
+ seriesHelpers [][]EvalSeriesHelper
+ bufHelpers [][]EvalSeriesHelper // Buffer updated on each step
+ )
+
+ // If the series preparation function is provided, we should run it for
+ // every single series in the matrix.
+ if prepSeries != nil {
+ seriesHelpers = make([][]EvalSeriesHelper, len(exprs))
+ bufHelpers = make([][]EvalSeriesHelper, len(exprs))
+
+ for i := range exprs {
+ seriesHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i]))
+ bufHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i]))
+
+ for si, series := range matrixes[i] {
+ h := seriesHelpers[i][si]
+ prepSeries(series.Metric, &h)
+ seriesHelpers[i][si] = h
+ }
+ }
+ }
+
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
ev.error(err)
@@ -920,11 +1068,20 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector,
// Gather input vectors for this timestamp.
for i := range exprs {
vectors[i] = vectors[i][:0]
+
+ if prepSeries != nil {
+ bufHelpers[i] = bufHelpers[i][:0]
+ }
+
for si, series := range matrixes[i] {
for _, point := range series.Points {
if point.T == ts {
if ev.currentSamples < ev.maxSamples {
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point})
+ if prepSeries != nil {
+ bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si])
+ }
+
// Move input vectors forward so we don't have to re-scan the same
// past points at the next step.
matrixes[i][si].Points = series.Points[1:]
@@ -938,9 +1095,10 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector,
}
args[i] = vectors[i]
}
+
// Make the function call.
enh.Ts = ts
- result, ws := f(args, enh)
+ result, ws := funcCall(args, bufHelpers, enh)
if result.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
@@ -1001,21 +1159,30 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector,
// evalSubquery evaluates given SubqueryExpr and returns an equivalent
// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set.
-func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, storage.Warnings) {
+func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) {
val, ws := ev.eval(subq)
mat := val.(Matrix)
vs := &parser.VectorSelector{
- Offset: subq.Offset,
- Series: make([]storage.Series, 0, len(mat)),
+ OriginalOffset: subq.OriginalOffset,
+ Offset: subq.Offset,
+ Series: make([]storage.Series, 0, len(mat)),
+ Timestamp: subq.Timestamp,
+ }
+ if subq.Timestamp != nil {
+ // The offset of subquery is not modified in case of @ modifier.
+ // Hence we take care of that here for the result.
+ vs.Offset = subq.OriginalOffset + time.Duration(ev.startTimestamp-*subq.Timestamp)*time.Millisecond
}
ms := &parser.MatrixSelector{
Range: subq.Range,
VectorSelector: vs,
}
+ totalSamples := 0
for _, s := range mat {
+ totalSamples += len(s.Points)
vs.Series = append(vs.Series, NewStorageSeries(s))
}
- return ms, ws
+ return ms, totalSamples, ws
}
// eval evaluates the given expression as the given AST expression node requires.
@@ -1027,33 +1194,53 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
}
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
+ // Create a new span to help investigate inner evaluation performances.
+ span, _ := opentracing.StartSpanFromContext(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String())
+ defer span.Finish()
+
switch e := expr.(type) {
case *parser.AggregateExpr:
+ // Grouping labels must be sorted (expected both by generateGroupingKey() and aggregation()).
+ sortedGrouping := e.Grouping
+ sort.Strings(sortedGrouping)
+
+ // Prepare a function to initialise series helpers with the grouping key.
+ buf := make([]byte, 0, 1024)
+ initSeries := func(series labels.Labels, h *EvalSeriesHelper) {
+ h.groupingKey, buf = generateGroupingKey(series, sortedGrouping, e.Without, buf)
+ }
+
unwrapParenExpr(&e.Param)
- if s, ok := e.Param.(*parser.StringLiteral); ok {
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
- return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh), nil
+ if s, ok := unwrapStepInvariantExpr(e.Param).(*parser.StringLiteral); ok {
+ return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil
}, e.Expr)
}
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+
+ return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
var param float64
if e.Param != nil {
param = v[0].(Vector)[0].V
}
- return ev.aggregation(e.Op, e.Grouping, e.Without, param, v[1].(Vector), enh), nil
+ return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil
}, e.Param, e.Expr)
case *parser.Call:
call := FunctionCalls[e.Func.Name]
-
if e.Func.Name == "timestamp" {
// Matrix evaluation always returns the evaluation time,
// so this function needs special handling when given
// a vector selector.
unwrapParenExpr(&e.Args[0])
- vs, ok := e.Args[0].(*parser.VectorSelector)
+ arg := unwrapStepInvariantExpr(e.Args[0])
+ vs, ok := arg.(*parser.VectorSelector)
if ok {
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ if vs.Timestamp != nil {
+ // This is a special case only for "timestamp" since the offset
+ // needs to be adjusted for every point.
+ vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond
+ }
val, ws := ev.vectorSelector(vs, enh.Ts)
return call([]parser.Value{val}, e.Args, enh), ws
})
@@ -1068,7 +1255,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
)
for i := range e.Args {
unwrapParenExpr(&e.Args[i])
- a := e.Args[i]
+ a := unwrapStepInvariantExpr(e.Args[i])
if _, ok := a.(*parser.MatrixSelector); ok {
matrixArgIndex = i
matrixArg = true
@@ -1079,15 +1266,20 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
matrixArgIndex = i
matrixArg = true
// Replacing parser.SubqueryExpr with parser.MatrixSelector.
- val, ws := ev.evalSubquery(subq)
+ val, totalSamples, ws := ev.evalSubquery(subq)
e.Args[i] = val
warnings = append(warnings, ws...)
+ defer func() {
+ // subquery result takes space in the memory. Get rid of that at the end.
+ val.VectorSelector.(*parser.VectorSelector).Series = nil
+ ev.currentSamples -= totalSamples
+ }()
break
}
}
if !matrixArg {
// Does not have a matrix argument.
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return call(v, e.Args, enh), warnings
}, e.Args...)
}
@@ -1106,7 +1298,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
}
}
- sel := e.Args[matrixArgIndex].(*parser.MatrixSelector)
+ sel := unwrapStepInvariantExpr(e.Args[matrixArgIndex]).(*parser.MatrixSelector)
selVS := sel.VectorSelector.(*parser.VectorSelector)
ws, err := checkAndExpandSeriesSet(ev.ctx, sel)
@@ -1132,11 +1324,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
ev.currentSamples -= len(points)
points = points[:0]
it.Reset(s.Iterator())
+ metric := selVS.Series[i].Labels()
+ // The last_over_time function acts like offset; thus, it
+ // should keep the metric name. For all the other range
+ // vector functions, the only change needed is to drop the
+ // metric name in the output.
+ if e.Func.Name != "last_over_time" {
+ metric = dropMetricName(metric)
+ }
ss := Series{
- // For all range vector functions, the only change to the
- // output labels is dropping the metric name so just do
- // it once here.
- Metric: dropMetricName(selVS.Series[i].Labels()),
+ Metric: metric,
Points: getPointSlice(numSteps),
}
inMatrix[0].Metric = selVS.Series[i].Labels()
@@ -1169,7 +1366,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
it.ReduceDelta(stepRange)
}
if len(ss.Points) > 0 {
- if ev.currentSamples < ev.maxSamples {
+ if ev.currentSamples+len(ss.Points) <= ev.maxSamples {
mat = append(mat, ss)
ev.currentSamples += len(ss.Points)
} else {
@@ -1249,53 +1446,56 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
case *parser.BinaryExpr:
switch lt, rt := e.LHS.Type(), e.RHS.Type(); {
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V)
return append(enh.Out, Sample{Point: Point{V: val}}), nil
}, e.LHS, e.RHS)
case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector:
switch e.Op {
case parser.LAND:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS)
case parser.LOR:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS)
case parser.LUNLESS:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS)
default:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh), nil
}, e.LHS, e.RHS)
}
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh), nil
}, e.LHS, e.RHS)
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh), nil
}, e.LHS, e.RHS)
}
case *parser.NumberLiteral:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil
})
+ case *parser.StringLiteral:
+ return String{V: e.Val, T: ev.startTimestamp}, nil
+
case *parser.VectorSelector:
ws, err := checkAndExpandSeriesSet(ev.ctx, e)
if err != nil {
ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), ws})
}
mat := make(Matrix, 0, len(e.Series))
- it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta))
+ it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
for i, s := range e.Series {
it.Reset(s.Iterator())
ss := Series{
@@ -1355,11 +1555,65 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
newEv.startTimestamp += newEv.interval
}
+ if newEv.startTimestamp != ev.startTimestamp {
+ // Adjust the offset of selectors based on the new
+ // start time of the evaluator since the calculation
+ // of the offset with @ happens w.r.t. the start time.
+ setOffsetForAtModifier(newEv.startTimestamp, e.Expr)
+ }
+
res, ws := newEv.eval(e.Expr)
ev.currentSamples = newEv.currentSamples
return res, ws
- case *parser.StringLiteral:
- return String{V: e.Val, T: ev.startTimestamp}, nil
+ case *parser.StepInvariantExpr:
+ switch ce := e.Expr.(type) {
+ case *parser.StringLiteral, *parser.NumberLiteral:
+ return ev.eval(ce)
+ }
+
+ newEv := &evaluator{
+ startTimestamp: ev.startTimestamp,
+ endTimestamp: ev.startTimestamp, // Always a single evaluation.
+ interval: ev.interval,
+ ctx: ev.ctx,
+ currentSamples: ev.currentSamples,
+ maxSamples: ev.maxSamples,
+ logger: ev.logger,
+ lookbackDelta: ev.lookbackDelta,
+ noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
+ }
+ res, ws := newEv.eval(e.Expr)
+ ev.currentSamples = newEv.currentSamples
+ switch e.Expr.(type) {
+ case *parser.MatrixSelector, *parser.SubqueryExpr:
+ // We do not duplicate results for range selectors since result is a matrix
+ // with their unique timestamps which does not depend on the step.
+ return res, ws
+ }
+
+ // For every evaluation while the value remains same, the timestamp for that
+ // value would change for different eval times. Hence we duplicate the result
+ // with changed timestamps.
+ mat, ok := res.(Matrix)
+ if !ok {
+ panic(errors.Errorf("unexpected result in StepInvariantExpr evaluation: %T", expr))
+ }
+ for i := range mat {
+ if len(mat[i].Points) != 1 {
+ panic(errors.Errorf("unexpected number of samples"))
+ }
+ for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval {
+ mat[i].Points = append(mat[i].Points, Point{
+ T: ts,
+ V: mat[i].Points[0].V,
+ })
+ ev.currentSamples++
+ if ev.currentSamples > ev.maxSamples {
+ ev.error(ErrTooManySamples(env))
+ }
+ }
+ }
+ return res, ws
}
panic(errors.Errorf("unhandled expression of type: %T", expr))
@@ -1372,7 +1626,7 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect
ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), ws})
}
vec := make(Vector, 0, len(node.Series))
- it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta))
+ it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
for i, s := range node.Series {
it.Reset(s.Iterator())
@@ -1382,18 +1636,19 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect
Metric: node.Series[i].Labels(),
Point: Point{V: v, T: t},
})
+
ev.currentSamples++
+ if ev.currentSamples > ev.maxSamples {
+ ev.error(ErrTooManySamples(env))
+ }
}
- if ev.currentSamples >= ev.maxSamples {
- ev.error(ErrTooManySamples(env))
- }
}
return vec, ws
}
// vectorSelectorSingle evaluates a instant vector for the iterator of one time series.
-func (ev *evaluator) vectorSelectorSingle(it *storage.BufferedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) {
+func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) {
refTime := ts - durationMilliseconds(node.Offset)
var t int64
var v float64
@@ -1410,7 +1665,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.BufferedSeriesIterator, no
}
if !ok || t > refTime {
- t, v, ok = it.PeekBack(1)
+ t, v, ok = it.PeekPrev()
if !ok || t < refTime-durationMilliseconds(node.GetLookbackDelta(ev.lookbackDelta)) {
return 0, 0, false
}
@@ -1432,7 +1687,7 @@ func getPointSlice(sz int) []Point {
}
func putPointSlice(p []Point) {
- //lint:ignore SA6002 relax staticcheck verification.
+ //nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
pointPool.Put(p[:0])
}
@@ -1520,8 +1775,8 @@ func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, m
if ev.currentSamples >= ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
- out = append(out, Point{T: t, V: v})
ev.currentSamples++
+ out = append(out, Point{T: t, V: v})
}
}
// The seeked sample might also be in the range.
@@ -1891,8 +2146,9 @@ type groupedAggregation struct {
reverseHeap vectorByReverseValueHeap
}
-// aggregation evaluates an aggregation operation on a Vector.
-func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, enh *EvalNodeHelper) Vector {
+// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
+// must be sorted.
+func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
result := map[uint64]*groupedAggregation{}
var k int64
@@ -1911,35 +2167,43 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
q = param.(float64)
}
var valueLabel string
+ var recomputeGroupingKey bool
if op == parser.COUNT_VALUES {
valueLabel = param.(string)
if !model.LabelName(valueLabel).IsValid() {
ev.errorf("invalid label name %q", valueLabel)
}
if !without {
+ // We're changing the grouping labels so we have to ensure they're still sorted
+ // and we have to flag to recompute the grouping key. Considering the count_values()
+ // operator is less frequently used than other aggregations, we're fine having to
+ // re-compute the grouping key on each step for this case.
grouping = append(grouping, valueLabel)
+ sort.Strings(grouping)
+ recomputeGroupingKey = true
}
}
- sort.Strings(grouping)
lb := labels.NewBuilder(nil)
- buf := make([]byte, 0, 1024)
- for _, s := range vec {
+ var buf []byte
+ for si, s := range vec {
metric := s.Metric
if op == parser.COUNT_VALUES {
lb.Reset(metric)
lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64))
metric = lb.Labels()
+
+ // We've changed the metric so we have to recompute the grouping key.
+ recomputeGroupingKey = true
}
- var (
- groupingKey uint64
- )
- if without {
- groupingKey, buf = metric.HashWithoutLabels(buf, grouping...)
+ // We can use the pre-computed grouping key unless grouping labels have changed.
+ var groupingKey uint64
+ if !recomputeGroupingKey {
+ groupingKey = seriesHelper[si].groupingKey
} else {
- groupingKey, buf = metric.HashForLabels(buf, grouping...)
+ groupingKey, buf = generateGroupingKey(metric, grouping, without, buf)
}
group, ok := result[groupingKey]
@@ -1953,16 +2217,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
lb.Del(labels.MetricName)
m = lb.Labels()
} else {
- m = make(labels.Labels, 0, len(grouping))
- for _, l := range metric {
- for _, n := range grouping {
- if l.Name == n {
- m = append(m, l)
- break
- }
- }
- }
- sort.Sort(m)
+ m = metric.WithLabels(grouping...)
}
result[groupingKey] = &groupedAggregation{
labels: m,
@@ -2101,7 +2356,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
continue // Bypass default append.
case parser.BOTTOMK:
- // The heap keeps the lowest value on top, so reverse it.
+ // The heap keeps the highest value on top, so reverse it.
sort.Sort(sort.Reverse(aggr.reverseHeap))
for _, v := range aggr.reverseHeap {
enh.Out = append(enh.Out, Sample{
@@ -2126,6 +2381,21 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
return enh.Out
}
+// groupingKey builds and returns the grouping key for the given metric and
+// grouping labels.
+func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {
+ if without {
+ return metric.HashWithoutLabels(buf, grouping...)
+ }
+
+ if len(grouping) == 0 {
+ // No need to generate any hash if there are no grouping labels.
+ return 0, buf
+ }
+
+ return metric.HashForLabels(buf, grouping...)
+}
+
// btos returns 1 if b is true, 0 otherwise.
func btos(b bool) float64 {
if b {
@@ -2164,3 +2434,158 @@ func unwrapParenExpr(e *parser.Expr) {
}
}
}
+
+func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
+ if p, ok := e.(*parser.StepInvariantExpr); ok {
+ return p.Expr
+ }
+ return e
+}
+
+// PreprocessExpr wraps all possible step invariant parts of the given expression with
+// StepInvariantExpr. It also resolves the preprocessors.
+func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr {
+ isStepInvariant := preprocessExprHelper(expr, start, end)
+ if isStepInvariant {
+ return newStepInvariantExpr(expr)
+ }
+ return expr
+}
+
+// preprocessExprHelper wraps the child nodes of the expression
+// with a StepInvariantExpr wherever it's step invariant. The returned boolean is true if the
+// passed expression qualifies to be wrapped by StepInvariantExpr.
+// It also resolves the preprocessors.
+func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
+ switch n := expr.(type) {
+ case *parser.VectorSelector:
+ if n.StartOrEnd == parser.START {
+ n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
+ } else if n.StartOrEnd == parser.END {
+ n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
+ }
+ return n.Timestamp != nil
+
+ case *parser.AggregateExpr:
+ return preprocessExprHelper(n.Expr, start, end)
+
+ case *parser.BinaryExpr:
+ isInvariant1, isInvariant2 := preprocessExprHelper(n.LHS, start, end), preprocessExprHelper(n.RHS, start, end)
+ if isInvariant1 && isInvariant2 {
+ return true
+ }
+
+ if isInvariant1 {
+ n.LHS = newStepInvariantExpr(n.LHS)
+ }
+ if isInvariant2 {
+ n.RHS = newStepInvariantExpr(n.RHS)
+ }
+
+ return false
+
+ case *parser.Call:
+ _, ok := AtModifierUnsafeFunctions[n.Func.Name]
+ isStepInvariant := !ok
+ isStepInvariantSlice := make([]bool, len(n.Args))
+ for i := range n.Args {
+ isStepInvariantSlice[i] = preprocessExprHelper(n.Args[i], start, end)
+ isStepInvariant = isStepInvariant && isStepInvariantSlice[i]
+ }
+
+ if isStepInvariant {
+
+ // The function and all arguments are step invariant.
+ return true
+ }
+
+ for i, isi := range isStepInvariantSlice {
+ if isi {
+ n.Args[i] = newStepInvariantExpr(n.Args[i])
+ }
+ }
+ return false
+
+ case *parser.MatrixSelector:
+ return preprocessExprHelper(n.VectorSelector, start, end)
+
+ case *parser.SubqueryExpr:
+ // Since we adjust offset for the @ modifier evaluation,
+ // it gets tricky to adjust it for every subquery step.
+ // Hence we wrap the inside of subquery irrespective of
+ // @ on subquery (given it is also step invariant) so that
+ // it is evaluated only once w.r.t. the start time of subquery.
+ isInvariant := preprocessExprHelper(n.Expr, start, end)
+ if isInvariant {
+ n.Expr = newStepInvariantExpr(n.Expr)
+ }
+ if n.StartOrEnd == parser.START {
+ n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
+ } else if n.StartOrEnd == parser.END {
+ n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
+ }
+ return n.Timestamp != nil
+
+ case *parser.ParenExpr:
+ return preprocessExprHelper(n.Expr, start, end)
+
+ case *parser.UnaryExpr:
+ return preprocessExprHelper(n.Expr, start, end)
+
+ case *parser.StringLiteral, *parser.NumberLiteral:
+ return true
+ }
+
+ panic(fmt.Sprintf("found unexpected node %#v", expr))
+}
+
+func newStepInvariantExpr(expr parser.Expr) parser.Expr {
+ if e, ok := expr.(*parser.ParenExpr); ok {
+ // Wrapping the inside of () makes it easy to unwrap the paren later.
+ // But this effectively unwraps the paren.
+ return newStepInvariantExpr(e.Expr)
+
+ }
+ return &parser.StepInvariantExpr{Expr: expr}
+}
+
+// setOffsetForAtModifier modifies the offset of vector and matrix selector
+// and subquery in the tree to accommodate the timestamp of @ modifier.
+// The offset is adjusted w.r.t. the given evaluation time.
+func setOffsetForAtModifier(evalTime int64, expr parser.Expr) {
+ getOffset := func(ts *int64, originalOffset time.Duration, path []parser.Node) time.Duration {
+ if ts == nil {
+ return originalOffset
+ }
+
+ subqOffset, _, subqTs := subqueryTimes(path)
+ if subqTs != nil {
+ subqOffset += time.Duration(evalTime-*subqTs) * time.Millisecond
+ }
+
+ offsetForTs := time.Duration(evalTime-*ts) * time.Millisecond
+ offsetDiff := offsetForTs - subqOffset
+ return originalOffset + offsetDiff
+ }
+
+ parser.Inspect(context.TODO(), &parser.EvalStmt{Expr: expr}, func(node parser.Node, path []parser.Node) error {
+ switch n := node.(type) {
+ case *parser.VectorSelector:
+ n.Offset = getOffset(n.Timestamp, n.OriginalOffset, path)
+
+ case *parser.MatrixSelector:
+ vs := n.VectorSelector.(*parser.VectorSelector)
+ vs.Offset = getOffset(vs.Timestamp, vs.OriginalOffset, path)
+
+ case *parser.SubqueryExpr:
+ n.Offset = getOffset(n.Timestamp, n.OriginalOffset, path)
+ }
+ return nil
+ }, nil)
+}
+
+func makeInt64Pointer(val int64) *int64 {
+ valp := new(int64)
+ *valp = val
+ return valp
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go
index 0499e8f15..d72b4caf6 100644
--- a/vendor/github.com/prometheus/prometheus/promql/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/functions.go
@@ -59,7 +59,6 @@ func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector {
ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector)
-
var (
samples = vals[0].(Matrix)[0]
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
@@ -71,17 +70,17 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
if len(samples.Points) < 2 {
return enh.Out
}
- var (
- counterCorrection float64
- lastValue float64
- )
- for _, sample := range samples.Points {
- if isCounter && sample.V < lastValue {
- counterCorrection += lastValue
+
+ resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V
+ if isCounter {
+ var lastValue float64
+ for _, sample := range samples.Points {
+ if sample.V < lastValue {
+ resultValue += lastValue
+ }
+ lastValue = sample.V
}
- lastValue = sample.V
}
- resultValue := lastValue - samples.Points[0].V + counterCorrection
// Duration between first/last samples and boundary of range.
durationToStart := float64(samples.Points[0].T-rangeStart) / 1000
@@ -279,6 +278,23 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
return Vector(byValueSorter)
}
+// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector ===
+func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
+ vec := vals[0].(Vector)
+ min := vals[1].(Vector)[0].Point.V
+ max := vals[2].(Vector)[0].Point.V
+ if max < min {
+ return enh.Out
+ }
+ for _, el := range vec {
+ enh.Out = append(enh.Out, Sample{
+ Metric: enh.DropMetricName(el.Metric),
+ Point: Point{V: math.Max(min, math.Min(max, el.V))},
+ })
+ }
+ return enh.Out
+}
+
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector ===
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
vec := vals[0].(Vector)
@@ -384,7 +400,16 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo
})
}
-// === floor(Vector parser.ValueTypeVector) Vector ===
+// === last_over_time(Matrix parser.ValueTypeMatrix) Vector ===
+func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
+ el := vals[0].(Matrix)[0]
+
+ return append(enh.Out, Sample{
+ Metric: el.Metric,
+ Point: Point{V: el.Points[len(el.Points)-1].V},
+ })
+}
+
// === max_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
return aggrOverTime(vals, enh, func(values []Point) float64 {
@@ -488,6 +513,13 @@ func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN
})
}
+// === present_over_time(Vector parser.ValueTypeMatrix) Vector ===
+func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
+ return aggrOverTime(vals, enh, func(values []Point) float64 {
+ return 1
+ })
+}
+
func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
for _, el := range vals[0].(Vector) {
enh.Out = append(enh.Out, Sample{
@@ -538,6 +570,18 @@ func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
return simpleFunc(vals, enh, math.Log10)
}
+// === sgn(Vector parser.ValueTypeVector) Vector ===
+func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
+ return simpleFunc(vals, enh, func(v float64) float64 {
+ if v < 0 {
+ return -1
+ } else if v > 0 {
+ return 1
+ }
+ return v
+ })
+}
+
// === timestamp(Vector parser.ValueTypeVector) Vector ===
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
vec := vals[0].(Vector)
@@ -598,7 +642,6 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
samples := vals[0].(Matrix)[0]
duration := vals[1].(Vector)[0].V
-
// No sense in trying to predict anything without at least two points.
// Drop this Vector element.
if len(samples.Points) < 2 {
@@ -701,10 +744,10 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp
func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
var (
vector = vals[0].(Vector)
- dst = args[1].(*parser.StringLiteral).Val
- repl = args[2].(*parser.StringLiteral).Val
- src = args[3].(*parser.StringLiteral).Val
- regexStr = args[4].(*parser.StringLiteral).Val
+ dst = stringFromArg(args[1])
+ repl = stringFromArg(args[2])
+ src = stringFromArg(args[3])
+ regexStr = stringFromArg(args[4])
)
if enh.regex == nil {
@@ -764,8 +807,8 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
var (
vector = vals[0].(Vector)
- dst = args[1].(*parser.StringLiteral).Val
- sep = args[2].(*parser.StringLiteral).Val
+ dst = stringFromArg(args[1])
+ sep = stringFromArg(args[2])
srcLabels = make([]string, len(args)-3)
)
@@ -774,7 +817,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
}
for i := 3; i < len(args); i++ {
- src := args[i].(*parser.StringLiteral).Val
+ src := stringFromArg(args[i])
if !model.LabelName(src).IsValid() {
panic(errors.Errorf("invalid source label name in label_join(): %s", src))
}
@@ -895,6 +938,7 @@ var FunctionCalls = map[string]FunctionCall{
"avg_over_time": funcAvgOverTime,
"ceil": funcCeil,
"changes": funcChanges,
+ "clamp": funcClamp,
"clamp_max": funcClampMax,
"clamp_min": funcClampMin,
"count_over_time": funcCountOverTime,
@@ -916,16 +960,19 @@ var FunctionCalls = map[string]FunctionCall{
"ln": funcLn,
"log10": funcLog10,
"log2": funcLog2,
+ "last_over_time": funcLastOverTime,
"max_over_time": funcMaxOverTime,
"min_over_time": funcMinOverTime,
"minute": funcMinute,
"month": funcMonth,
"predict_linear": funcPredictLinear,
+ "present_over_time": funcPresentOverTime,
"quantile_over_time": funcQuantileOverTime,
"rate": funcRate,
"resets": funcResets,
"round": funcRound,
"scalar": funcScalar,
+ "sgn": funcSgn,
"sort": funcSort,
"sort_desc": funcSortDesc,
"sqrt": funcSqrt,
@@ -938,6 +985,21 @@ var FunctionCalls = map[string]FunctionCall{
"year": funcYear,
}
+// AtModifierUnsafeFunctions are the functions whose result
+// can vary if evaluation time is changed when the arguments are
+// step invariant. It also includes functions that use the timestamps
+// of the passed instant vector argument to calculate a result since
+// that can also change with change in eval time.
+var AtModifierUnsafeFunctions = map[string]struct{}{
+ // Step invariant functions.
+ "days_in_month": {}, "day_of_month": {}, "day_of_week": {},
+ "hour": {}, "minute": {}, "month": {}, "year": {},
+ "predict_linear": {}, "time": {},
+ // Uses timestamp of the argument for the result,
+ // hence unsafe to use with @ modifier.
+ "timestamp": {},
+}
+
type vectorByValueHeap Vector
func (s vectorByValueHeap) Len() int {
@@ -1028,3 +1090,7 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
}
return m
}
+
+func stringFromArg(e parser.Expr) string {
+ return unwrapStepInvariantExpr(e).(*parser.StringLiteral).Val
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/fuzz.go b/vendor/github.com/prometheus/prometheus/promql/fuzz.go
index eeb1fede8..b34fbbc66 100644
--- a/vendor/github.com/prometheus/prometheus/promql/fuzz.go
+++ b/vendor/github.com/prometheus/prometheus/promql/fuzz.go
@@ -12,6 +12,7 @@
// limitations under the License.
// Only build when go-fuzz is in use
+//go:build gofuzz
// +build gofuzz
package promql
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
index 6ddc1a1d9..8fe3ec301 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
@@ -15,7 +15,6 @@ package parser
import (
"context"
- "sync"
"time"
"github.com/pkg/errors"
@@ -126,10 +125,18 @@ type MatrixSelector struct {
// SubqueryExpr represents a subquery.
type SubqueryExpr struct {
- Expr Expr
- Range time.Duration
- Offset time.Duration
- Step time.Duration
+ Expr Expr
+ Range time.Duration
+ // OriginalOffset is the actual offset that was set in the query.
+ // This never changes.
+ OriginalOffset time.Duration
+ // Offset is the offset used during the query execution
+ // which is calculated using the original offset, at modifier time,
+ // eval time, and subquery offsets in the AST tree.
+ Offset time.Duration
+ Timestamp *int64
+ StartOrEnd ItemType // Set when @ is used with start() or end()
+ Step time.Duration
EndPos Pos
}
@@ -163,10 +170,29 @@ type UnaryExpr struct {
StartPos Pos
}
+// StepInvariantExpr represents a query which evaluates to the same result
+// irrespective of the evaluation time given the raw samples from TSDB remain unchanged.
+// Currently this is only used for engine optimisations and the parser does not produce this.
+type StepInvariantExpr struct {
+ Expr Expr
+}
+
+func (e *StepInvariantExpr) String() string { return e.Expr.String() }
+
+func (e *StepInvariantExpr) PositionRange() PositionRange { return e.Expr.PositionRange() }
+
// VectorSelector represents a Vector selection.
type VectorSelector struct {
- Name string
+ Name string
+ // OriginalOffset is the actual offset that was set in the query.
+ // This never changes.
+ OriginalOffset time.Duration
+ // Offset is the offset used during the query execution
+ // which is calculated using the original offset, at modifier time,
+ // eval time, and subquery offsets in the AST tree.
Offset time.Duration
+ Timestamp *int64
+ StartOrEnd ItemType // Set when @ is used with start() or end()
LabelMatchers []*labels.Matcher
LookbackDelta time.Duration
@@ -213,17 +239,19 @@ func (e *BinaryExpr) Type() ValueType {
}
return ValueTypeVector
}
+func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() }
-func (*AggregateExpr) PromQLExpr() {}
-func (*BinaryExpr) PromQLExpr() {}
-func (*Call) PromQLExpr() {}
-func (*MatrixSelector) PromQLExpr() {}
-func (*SubqueryExpr) PromQLExpr() {}
-func (*NumberLiteral) PromQLExpr() {}
-func (*ParenExpr) PromQLExpr() {}
-func (*StringLiteral) PromQLExpr() {}
-func (*UnaryExpr) PromQLExpr() {}
-func (*VectorSelector) PromQLExpr() {}
+func (*AggregateExpr) PromQLExpr() {}
+func (*BinaryExpr) PromQLExpr() {}
+func (*Call) PromQLExpr() {}
+func (*MatrixSelector) PromQLExpr() {}
+func (*SubqueryExpr) PromQLExpr() {}
+func (*NumberLiteral) PromQLExpr() {}
+func (*ParenExpr) PromQLExpr() {}
+func (*StringLiteral) PromQLExpr() {}
+func (*UnaryExpr) PromQLExpr() {}
+func (*VectorSelector) PromQLExpr() {}
+func (*StepInvariantExpr) PromQLExpr() {}
// VectorMatchCardinality describes the cardinality relationship
// of two Vectors in a binary operation.
@@ -305,26 +333,12 @@ func Walk(ctx context.Context, v Visitor, s *EvalStmt, node Node, path []Node, n
}
path = append(path, node)
- // We parallelize the execution of children
- wg := &sync.WaitGroup{}
- children := Children(node)
- errs := make([]error, len(children))
- for i, e := range children {
- wg.Add(1)
- go func(i int, e Node) {
- defer wg.Done()
- if childNode, childErr := Walk(ctx, v, s, e, append([]Node{}, path...), nr); err != nil {
- errs[i] = childErr
- } else {
- SetChild(node, i, childNode)
- }
- }(i, e)
- }
- wg.Wait()
- // If there was an error we return the first one
- for _, err := range errs {
- if err != nil {
+ // TODO: parallel execution of children
+ for i, e := range Children(node) {
+ if childNode, err := Walk(ctx, v, s, e, path, nr); err != nil {
return node, err
+ } else {
+ SetChild(node, i, childNode)
}
}
@@ -332,6 +346,18 @@ func Walk(ctx context.Context, v Visitor, s *EvalStmt, node Node, path []Node, n
return node, err
}
+func ExtractSelectors(expr Expr) [][]*labels.Matcher {
+ var selectors [][]*labels.Matcher
+ Inspect(context.TODO(), &EvalStmt{Expr: expr}, func(node Node, _ []Node) error {
+ vs, ok := node.(*VectorSelector)
+ if ok {
+ selectors = append(selectors, vs.LabelMatchers)
+ }
+ return nil
+ }, nil)
+ return selectors
+}
+
type inspector func(Node, []Node) error
func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
@@ -389,9 +415,9 @@ func SetChild(node Node, i int, child Node) {
case *UnaryExpr:
n.Expr = child.(Expr)
case *MatrixSelector:
+ case *StepInvariantExpr:
+ n.Expr = child.(Expr)
case *NumberLiteral, *StringLiteral, *VectorSelector:
- default:
- panic(errors.Errorf("promql.Children: unhandled node type %T", node))
}
}
@@ -437,6 +463,8 @@ func Children(node Node) []Node {
return []Node{n.Expr}
case *MatrixSelector:
return []Node{n.VectorSelector}
+ case *StepInvariantExpr:
+ return []Node{n.Expr}
case *NumberLiteral, *StringLiteral, *VectorSelector:
// nothing to do
return []Node{}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
index 4516829e5..da5d279f3 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
@@ -39,6 +39,11 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
+ "present_over_time": {
+ Name: "present_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
"avg_over_time": {
Name: "avg_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
@@ -54,6 +59,11 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
+ "clamp": {
+ Name: "clamp",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar, ValueTypeScalar},
+ ReturnType: ValueTypeVector,
+ },
"clamp_max": {
Name: "clamp_max",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
@@ -149,6 +159,11 @@ var Functions = map[string]*Function{
Variadic: -1,
ReturnType: ValueTypeVector,
},
+ "last_over_time": {
+ Name: "last_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
"ln": {
Name: "ln",
ArgTypes: []ValueType{ValueTypeVector},
@@ -217,6 +232,11 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeScalar,
},
+ "sgn": {
+ Name: "sgn",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
"sort": {
Name: "sort",
ArgTypes: []ValueType{ValueTypeVector},
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
index f0bdc320f..75f147ee4 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
@@ -83,6 +83,7 @@ NEQ
NEQ_REGEX
POW
SUB
+AT
%token operatorsEnd
// Aggregators.
@@ -115,6 +116,13 @@ ON
WITHOUT
%token keywordsEnd
+// Preprocessors.
+%token preprocessorStart
+%token
+START
+END
+%token preprocessorEnd
+
// Start symbols for the generated parser.
%token startSymbolsStart
@@ -130,15 +138,15 @@ START_METRIC_SELECTOR
%type label_match_list
%type label_matcher
-%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op
+%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors
%type label_set label_set_list metric
%type